]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.4-201108300001.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201108300001.patch
1 diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2 --- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40 --- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86 --- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245 --- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344 --- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358 --- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382 --- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456 --- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536 --- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587 --- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639 --- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671 --- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757 --- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837 --- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975 --- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039 --- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085 --- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109 --- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121 --- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166 --- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185 --- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212 --- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314 --- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447 --- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461 --- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033 --- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075 --- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278 --- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411 --- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480 --- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508 --- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555 --- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629 --- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672 --- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684 --- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733 --- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745 --- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757 --- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366 --- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068 --- linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080 --- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971 --- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008 --- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048 --- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061 --- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112 --- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133 --- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533 --- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545 --- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558 --- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570 --- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599 + memset(&dump, 0, sizeof(dump));
5600 +
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604 diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615 @@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619 + .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 + call pax_enter_kernel_user
5622 +#endif
5623 + .endm
5624 +
5625 + .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 + call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 + pushq %rax
5631 + call pax_randomize_kstack
5632 + popq %rax
5633 +#endif
5634 + .endm
5635 +
5636 + .macro pax_erase_kstack
5637 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638 + call pax_erase_kstack
5639 +#endif
5640 + .endm
5641 +
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645 @@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649 - addq $(KERNEL_STACK_OFFSET),%rsp
5650 + pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654 @@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659 + GET_THREAD_INFO(%r10)
5660 + movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664 @@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668 +
5669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5670 + mov $PAX_USER_SHADOW_BASE,%r10
5671 + add %r10,%rbp
5672 +#endif
5673 +
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677 @@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681 + pax_exit_kernel_user
5682 + pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686 @@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690 +
5691 + pax_erase_kstack
5692 +
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696 @@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700 +
5701 + pax_erase_kstack
5702 +
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706 @@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711 + CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718 +
5719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5720 + pax_enter_kernel_user
5721 +#endif
5722 +
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728 - SAVE_ARGS 8,1,1
5729 + SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733 @@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737 +
5738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5739 + mov $PAX_USER_SHADOW_BASE,%r10
5740 + add %r10,%r8
5741 +#endif
5742 +
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746 @@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750 + pax_exit_kernel_user
5751 + pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755 @@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759 +
5760 + pax_erase_kstack
5761 +
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765 @@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769 + pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773 @@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777 +
5778 + pax_erase_kstack
5779 +
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5784 --- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785 +++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790 - sp = ((sp + 4) & -16ul) - 4;
5791 + sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808 - 0,
5809 + 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817 + else if (current->mm->context.vdso)
5818 + /* Return stub is in 32bit vsyscall page */
5819 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822 - rt_sigreturn);
5823 + restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835 diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5836 --- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837 +++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842 - ".section .altinstr_replacement, \"ax\"\n" \
5843 + ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847 diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5848 --- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849 +++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854 -extern unsigned int apic_verbosity;
5855 +extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859 diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5860 --- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861 +++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866 - "lcall *%%cs:apm_bios_entry\n\t"
5867 + "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875 - "lcall *%%cs:apm_bios_entry\n\t"
5876 + "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5881 --- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882 +++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883 @@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887 +#ifdef CONFIG_PAX_REFCOUNT
5888 +typedef struct {
5889 + u64 __aligned(8) counter;
5890 +} atomic64_unchecked_t;
5891 +#else
5892 +typedef atomic64_t atomic64_unchecked_t;
5893 +#endif
5894 +
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903 + * @p: pointer to type atomic64_unchecked_t
5904 + * @o: expected value
5905 + * @n: new value
5906 + *
5907 + * Atomically sets @v to @n if it was equal to @o and returns
5908 + * the old value.
5909 + */
5910 +
5911 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912 +{
5913 + return cmpxchg64(&v->counter, o, n);
5914 +}
5915 +
5916 +/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924 + * atomic64_set_unchecked - set atomic64 variable
5925 + * @v: pointer to type atomic64_unchecked_t
5926 + * @n: value to assign
5927 + *
5928 + * Atomically sets the value of @v to @n.
5929 + */
5930 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931 +{
5932 + unsigned high = (unsigned)(i >> 32);
5933 + unsigned low = (unsigned)i;
5934 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5935 + : "+b" (low), "+c" (high)
5936 + : "S" (v)
5937 + : "eax", "edx", "memory"
5938 + );
5939 +}
5940 +
5941 +/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949 + * atomic64_read_unchecked - read atomic64 variable
5950 + * @v: pointer to type atomic64_unchecked_t
5951 + *
5952 + * Atomically reads the value of @v and returns it.
5953 + */
5954 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955 +{
5956 + long long r;
5957 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958 + : "=A" (r), "+c" (v)
5959 + : : "memory"
5960 + );
5961 + return r;
5962 + }
5963 +
5964 +/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972 +/**
5973 + * atomic64_add_return_unchecked - add and return
5974 + * @i: integer value to add
5975 + * @v: pointer to type atomic64_unchecked_t
5976 + *
5977 + * Atomically adds @i to @v and returns @i + *@v
5978 + */
5979 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980 +{
5981 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982 + : "+A" (i), "+c" (v)
5983 + : : "memory"
5984 + );
5985 + return i;
5986 +}
5987 +
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996 +{
5997 + long long a;
5998 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999 + : "=A" (a)
6000 + : "S" (v)
6001 + : "memory", "ecx"
6002 + );
6003 + return a;
6004 +}
6005 +
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013 + * atomic64_add_unchecked - add integer to atomic64 variable
6014 + * @i: integer value to add
6015 + * @v: pointer to type atomic64_unchecked_t
6016 + *
6017 + * Atomically adds @i to @v.
6018 + */
6019 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020 +{
6021 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022 + : "+A" (i), "+c" (v)
6023 + : : "memory"
6024 + );
6025 + return i;
6026 +}
6027 +
6028 +/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6033 --- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034 +++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035 @@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039 - return (*(volatile long *)&(v)->counter);
6040 + return (*(volatile const long *)&(v)->counter);
6041 +}
6042 +
6043 +/**
6044 + * atomic64_read_unchecked - read atomic64 variable
6045 + * @v: pointer of type atomic64_unchecked_t
6046 + *
6047 + * Atomically reads the value of @v.
6048 + * Doesn't imply a read memory barrier.
6049 + */
6050 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051 +{
6052 + return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060 + * atomic64_set_unchecked - set atomic64 variable
6061 + * @v: pointer to type atomic64_unchecked_t
6062 + * @i: required value
6063 + *
6064 + * Atomically sets the value of @v to @i.
6065 + */
6066 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067 +{
6068 + v->counter = i;
6069 +}
6070 +
6071 +/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080 +
6081 +#ifdef CONFIG_PAX_REFCOUNT
6082 + "jno 0f\n"
6083 + LOCK_PREFIX "subq %1,%0\n"
6084 + "int $4\n0:\n"
6085 + _ASM_EXTABLE(0b, 0b)
6086 +#endif
6087 +
6088 + : "=m" (v->counter)
6089 + : "er" (i), "m" (v->counter));
6090 +}
6091 +
6092 +/**
6093 + * atomic64_add_unchecked - add integer to atomic64 variable
6094 + * @i: integer value to add
6095 + * @v: pointer to type atomic64_unchecked_t
6096 + *
6097 + * Atomically adds @i to @v.
6098 + */
6099 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100 +{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108 - asm volatile(LOCK_PREFIX "subq %1,%0"
6109 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + "jno 0f\n"
6113 + LOCK_PREFIX "addq %1,%0\n"
6114 + "int $4\n0:\n"
6115 + _ASM_EXTABLE(0b, 0b)
6116 +#endif
6117 +
6118 + : "=m" (v->counter)
6119 + : "er" (i), "m" (v->counter));
6120 +}
6121 +
6122 +/**
6123 + * atomic64_sub_unchecked - subtract the atomic64 variable
6124 + * @i: integer value to subtract
6125 + * @v: pointer to type atomic64_unchecked_t
6126 + *
6127 + * Atomically subtracts @i from @v.
6128 + */
6129 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130 +{
6131 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141 +
6142 +#ifdef CONFIG_PAX_REFCOUNT
6143 + "jno 0f\n"
6144 + LOCK_PREFIX "addq %2,%0\n"
6145 + "int $4\n0:\n"
6146 + _ASM_EXTABLE(0b, 0b)
6147 +#endif
6148 +
6149 + "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157 + asm volatile(LOCK_PREFIX "incq %0\n"
6158 +
6159 +#ifdef CONFIG_PAX_REFCOUNT
6160 + "jno 0f\n"
6161 + LOCK_PREFIX "decq %0\n"
6162 + "int $4\n0:\n"
6163 + _ASM_EXTABLE(0b, 0b)
6164 +#endif
6165 +
6166 + : "=m" (v->counter)
6167 + : "m" (v->counter));
6168 +}
6169 +
6170 +/**
6171 + * atomic64_inc_unchecked - increment atomic64 variable
6172 + * @v: pointer to type atomic64_unchecked_t
6173 + *
6174 + * Atomically increments @v by 1.
6175 + */
6176 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177 +{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185 - asm volatile(LOCK_PREFIX "decq %0"
6186 + asm volatile(LOCK_PREFIX "decq %0\n"
6187 +
6188 +#ifdef CONFIG_PAX_REFCOUNT
6189 + "jno 0f\n"
6190 + LOCK_PREFIX "incq %0\n"
6191 + "int $4\n0:\n"
6192 + _ASM_EXTABLE(0b, 0b)
6193 +#endif
6194 +
6195 + : "=m" (v->counter)
6196 + : "m" (v->counter));
6197 +}
6198 +
6199 +/**
6200 + * atomic64_dec_unchecked - decrement atomic64 variable
6201 + * @v: pointer to type atomic64_t
6202 + *
6203 + * Atomically decrements @v by 1.
6204 + */
6205 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206 +{
6207 + asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216 + asm volatile(LOCK_PREFIX "decq %0\n"
6217 +
6218 +#ifdef CONFIG_PAX_REFCOUNT
6219 + "jno 0f\n"
6220 + LOCK_PREFIX "incq %0\n"
6221 + "int $4\n0:\n"
6222 + _ASM_EXTABLE(0b, 0b)
6223 +#endif
6224 +
6225 + "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234 + asm volatile(LOCK_PREFIX "incq %0\n"
6235 +
6236 +#ifdef CONFIG_PAX_REFCOUNT
6237 + "jno 0f\n"
6238 + LOCK_PREFIX "decq %0\n"
6239 + "int $4\n0:\n"
6240 + _ASM_EXTABLE(0b, 0b)
6241 +#endif
6242 +
6243 + "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + "jno 0f\n"
6256 + LOCK_PREFIX "subq %2,%0\n"
6257 + "int $4\n0:\n"
6258 + _ASM_EXTABLE(0b, 0b)
6259 +#endif
6260 +
6261 + "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271 +
6272 +#ifdef CONFIG_PAX_REFCOUNT
6273 + "jno 0f\n"
6274 + "movq %0, %1\n"
6275 + "int $4\n0:\n"
6276 + _ASM_EXTABLE(0b, 0b)
6277 +#endif
6278 +
6279 + : "+r" (i), "+m" (v->counter)
6280 + : : "memory");
6281 + return i + __i;
6282 +}
6283 +
6284 +/**
6285 + * atomic64_add_return_unchecked - add and return
6286 + * @i: integer value to add
6287 + * @v: pointer to type atomic64_unchecked_t
6288 + *
6289 + * Atomically adds @i to @v and returns @i + @v
6290 + */
6291 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292 +{
6293 + long __i = i;
6294 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303 +{
6304 + return atomic64_add_return_unchecked(1, v);
6305 +}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314 +{
6315 + return cmpxchg(&v->counter, old, new);
6316 +}
6317 +
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325 - long c, old;
6326 + long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329 - if (unlikely(c == (u)))
6330 + if (unlikely(c == u))
6331 break;
6332 - old = atomic64_cmpxchg((v), c, c + (a));
6333 +
6334 + asm volatile("add %2,%0\n"
6335 +
6336 +#ifdef CONFIG_PAX_REFCOUNT
6337 + "jno 0f\n"
6338 + "sub %2,%0\n"
6339 + "int $4\n0:\n"
6340 + _ASM_EXTABLE(0b, 0b)
6341 +#endif
6342 +
6343 + : "=r" (new)
6344 + : "0" (c), "ir" (a));
6345 +
6346 + old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351 - return c != (u);
6352 + return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6357 --- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358 +++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359 @@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363 - return (*(volatile int *)&(v)->counter);
6364 + return (*(volatile const int *)&(v)->counter);
6365 +}
6366 +
6367 +/**
6368 + * atomic_read_unchecked - read atomic variable
6369 + * @v: pointer of type atomic_unchecked_t
6370 + *
6371 + * Atomically reads the value of @v.
6372 + */
6373 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374 +{
6375 + return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383 + * atomic_set_unchecked - set atomic variable
6384 + * @v: pointer of type atomic_unchecked_t
6385 + * @i: required value
6386 + *
6387 + * Atomically sets the value of @v to @i.
6388 + */
6389 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390 +{
6391 + v->counter = i;
6392 +}
6393 +
6394 +/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402 - asm volatile(LOCK_PREFIX "addl %1,%0"
6403 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + "jno 0f\n"
6407 + LOCK_PREFIX "subl %1,%0\n"
6408 + "int $4\n0:\n"
6409 + _ASM_EXTABLE(0b, 0b)
6410 +#endif
6411 +
6412 + : "+m" (v->counter)
6413 + : "ir" (i));
6414 +}
6415 +
6416 +/**
6417 + * atomic_add_unchecked - add integer to atomic variable
6418 + * @i: integer value to add
6419 + * @v: pointer of type atomic_unchecked_t
6420 + *
6421 + * Atomically adds @i to @v.
6422 + */
6423 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424 +{
6425 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433 - asm volatile(LOCK_PREFIX "subl %1,%0"
6434 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + "jno 0f\n"
6438 + LOCK_PREFIX "addl %1,%0\n"
6439 + "int $4\n0:\n"
6440 + _ASM_EXTABLE(0b, 0b)
6441 +#endif
6442 +
6443 + : "+m" (v->counter)
6444 + : "ir" (i));
6445 +}
6446 +
6447 +/**
6448 + * atomic_sub_unchecked - subtract integer from atomic variable
6449 + * @i: integer value to subtract
6450 + * @v: pointer of type atomic_unchecked_t
6451 + *
6452 + * Atomically subtracts @i from @v.
6453 + */
6454 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455 +{
6456 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + "jno 0f\n"
6469 + LOCK_PREFIX "addl %2,%0\n"
6470 + "int $4\n0:\n"
6471 + _ASM_EXTABLE(0b, 0b)
6472 +#endif
6473 +
6474 + "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482 - asm volatile(LOCK_PREFIX "incl %0"
6483 + asm volatile(LOCK_PREFIX "incl %0\n"
6484 +
6485 +#ifdef CONFIG_PAX_REFCOUNT
6486 + "jno 0f\n"
6487 + LOCK_PREFIX "decl %0\n"
6488 + "int $4\n0:\n"
6489 + _ASM_EXTABLE(0b, 0b)
6490 +#endif
6491 +
6492 + : "+m" (v->counter));
6493 +}
6494 +
6495 +/**
6496 + * atomic_inc_unchecked - increment atomic variable
6497 + * @v: pointer of type atomic_unchecked_t
6498 + *
6499 + * Atomically increments @v by 1.
6500 + */
6501 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502 +{
6503 + asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511 - asm volatile(LOCK_PREFIX "decl %0"
6512 + asm volatile(LOCK_PREFIX "decl %0\n"
6513 +
6514 +#ifdef CONFIG_PAX_REFCOUNT
6515 + "jno 0f\n"
6516 + LOCK_PREFIX "incl %0\n"
6517 + "int $4\n0:\n"
6518 + _ASM_EXTABLE(0b, 0b)
6519 +#endif
6520 +
6521 + : "+m" (v->counter));
6522 +}
6523 +
6524 +/**
6525 + * atomic_dec_unchecked - decrement atomic variable
6526 + * @v: pointer of type atomic_unchecked_t
6527 + *
6528 + * Atomically decrements @v by 1.
6529 + */
6530 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531 +{
6532 + asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541 + asm volatile(LOCK_PREFIX "decl %0\n"
6542 +
6543 +#ifdef CONFIG_PAX_REFCOUNT
6544 + "jno 0f\n"
6545 + LOCK_PREFIX "incl %0\n"
6546 + "int $4\n0:\n"
6547 + _ASM_EXTABLE(0b, 0b)
6548 +#endif
6549 +
6550 + "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559 + asm volatile(LOCK_PREFIX "incl %0\n"
6560 +
6561 +#ifdef CONFIG_PAX_REFCOUNT
6562 + "jno 0f\n"
6563 + LOCK_PREFIX "decl %0\n"
6564 + "int $4\n0:\n"
6565 + _ASM_EXTABLE(0b, 0b)
6566 +#endif
6567 +
6568 + "sete %1\n"
6569 + : "+m" (v->counter), "=qm" (c)
6570 + : : "memory");
6571 + return c != 0;
6572 +}
6573 +
6574 +/**
6575 + * atomic_inc_and_test_unchecked - increment and test
6576 + * @v: pointer of type atomic_unchecked_t
6577 + *
6578 + * Atomically increments @v by 1
6579 + * and returns true if the result is zero, or false for all
6580 + * other cases.
6581 + */
6582 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583 +{
6584 + unsigned char c;
6585 +
6586 + asm volatile(LOCK_PREFIX "incl %0\n"
6587 + "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597 +
6598 +#ifdef CONFIG_PAX_REFCOUNT
6599 + "jno 0f\n"
6600 + LOCK_PREFIX "subl %2,%0\n"
6601 + "int $4\n0:\n"
6602 + _ASM_EXTABLE(0b, 0b)
6603 +#endif
6604 +
6605 + "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614 +
6615 +#ifdef CONFIG_PAX_REFCOUNT
6616 + "jno 0f\n"
6617 + "movl %0, %1\n"
6618 + "int $4\n0:\n"
6619 + _ASM_EXTABLE(0b, 0b)
6620 +#endif
6621 +
6622 + : "+r" (i), "+m" (v->counter)
6623 + : : "memory");
6624 + return i + __i;
6625 +
6626 +#ifdef CONFIG_M386
6627 +no_xadd: /* Legacy 386 processor */
6628 + local_irq_save(flags);
6629 + __i = atomic_read(v);
6630 + atomic_set(v, i + __i);
6631 + local_irq_restore(flags);
6632 + return i + __i;
6633 +#endif
6634 +}
6635 +
6636 +/**
6637 + * atomic_add_return_unchecked - add integer and return
6638 + * @v: pointer of type atomic_unchecked_t
6639 + * @i: integer value to add
6640 + *
6641 + * Atomically adds @i to @v and returns @i + @v
6642 + */
6643 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644 +{
6645 + int __i;
6646 +#ifdef CONFIG_M386
6647 + unsigned long flags;
6648 + if (unlikely(boot_cpu_data.x86 <= 3))
6649 + goto no_xadd;
6650 +#endif
6651 + /* Modern 486+ processor */
6652 + __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661 +{
6662 + return atomic_add_return_unchecked(1, v);
6663 +}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672 +{
6673 + return cmpxchg(&v->counter, old, new);
6674 +}
6675 +
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682 +{
6683 + return xchg(&v->counter, new);
6684 +}
6685 +
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693 - int c, old;
6694 + int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697 - if (unlikely(c == (u)))
6698 + if (unlikely(c == u))
6699 break;
6700 - old = atomic_cmpxchg((v), c, c + (a));
6701 +
6702 + asm volatile("addl %2,%0\n"
6703 +
6704 +#ifdef CONFIG_PAX_REFCOUNT
6705 + "jno 0f\n"
6706 + "subl %2,%0\n"
6707 + "int $4\n0:\n"
6708 + _ASM_EXTABLE(0b, 0b)
6709 +#endif
6710 +
6711 + : "=r" (new)
6712 + : "0" (c), "ir" (a));
6713 +
6714 + old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719 - return c != (u);
6720 + return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725 +/**
6726 + * atomic_inc_not_zero_hint - increment if not null
6727 + * @v: pointer of type atomic_t
6728 + * @hint: probable value of the atomic before the increment
6729 + *
6730 + * This version of atomic_inc_not_zero() gives a hint of probable
6731 + * value of the atomic. This helps processor to not read the memory
6732 + * before doing the atomic read/modify/write cycle, lowering
6733 + * number of bus transactions on some arches.
6734 + *
6735 + * Returns: 0 if increment was not done, 1 otherwise.
6736 + */
6737 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739 +{
6740 + int val, c = hint, new;
6741 +
6742 + /* sanity test, should be removed by compiler if hint is a constant */
6743 + if (!hint)
6744 + return atomic_inc_not_zero(v);
6745 +
6746 + do {
6747 + asm volatile("incl %0\n"
6748 +
6749 +#ifdef CONFIG_PAX_REFCOUNT
6750 + "jno 0f\n"
6751 + "decl %0\n"
6752 + "int $4\n0:\n"
6753 + _ASM_EXTABLE(0b, 0b)
6754 +#endif
6755 +
6756 + : "=r" (new)
6757 + : "0" (c));
6758 +
6759 + val = atomic_cmpxchg(v, c, new);
6760 + if (val == c)
6761 + return 1;
6762 + c = val;
6763 + } while (c);
6764 +
6765 + return 0;
6766 +}
6767 +
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771 diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6772 --- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773 +++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774 @@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783 diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6784 --- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785 +++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786 @@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795 +#ifndef __ASSEMBLY__
6796 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798 +#endif
6799 +
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803 diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6804 --- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805 +++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810 - return -1;
6811 + return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815 diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6816 --- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817 +++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818 @@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826 +#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834 diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6835 --- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836 +++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842 + int len, __wsum sum,
6843 + int *src_err_ptr, int *dst_err_ptr);
6844 +
6845 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846 + int len, __wsum sum,
6847 + int *src_err_ptr, int *dst_err_ptr);
6848 +
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856 - return csum_partial_copy_generic((__force void *)src, dst,
6857 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865 - return csum_partial_copy_generic(src, (__force void *)dst,
6866 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870 diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6871 --- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872 +++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877 - ".section .altinstr_replacement,\"ax\"\n"
6878 + ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882 diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6883 --- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884 +++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885 @@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889 + struct {
6890 + u16 offset_low;
6891 + u16 seg;
6892 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893 + unsigned offset_high: 16;
6894 + } gate;
6895 };
6896 } __attribute__((packed));
6897
6898 diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6899 --- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900 +++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901 @@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905 +#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913 + desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921 -extern gate_desc idt_table[];
6922 -
6923 -struct gdt_page {
6924 - struct desc_struct gdt[GDT_ENTRIES];
6925 -} __attribute__((aligned(PAGE_SIZE)));
6926 -
6927 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928 +extern gate_desc idt_table[256];
6929
6930 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933 - return per_cpu(gdt_page, cpu).gdt;
6934 + return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942 - gate->a = (seg << 16) | (base & 0xffff);
6943 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944 + gate->gate.offset_low = base;
6945 + gate->gate.seg = seg;
6946 + gate->gate.reserved = 0;
6947 + gate->gate.type = type;
6948 + gate->gate.s = 0;
6949 + gate->gate.dpl = dpl;
6950 + gate->gate.p = 1;
6951 + gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959 + pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961 + pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966 + pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968 + pax_close_kernel();
6969 }
6970
6971 static inline void
6972 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976 + pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978 + pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986 + pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988 + pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996 + pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999 + pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007 -static inline void _set_gate(int gate, unsigned type, void *addr,
7008 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016 -static inline void set_intr_gate(unsigned int n, void *addr)
7017 +static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7026 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7033 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039 -static inline void set_trap_gate(unsigned int n, void *addr)
7040 +static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066 +#ifdef CONFIG_X86_32
7067 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068 +{
7069 + struct desc_struct d;
7070 +
7071 + if (likely(limit))
7072 + limit = (limit - 1UL) >> PAGE_SHIFT;
7073 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075 +}
7076 +#endif
7077 +
7078 #endif /* _ASM_X86_DESC_H */
7079 diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7080 --- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081 +++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082 @@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086 -#define BIOS_BEGIN 0x000a0000
7087 +#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091 diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7092 --- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093 +++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094 @@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098 +#ifdef CONFIG_PAX_SEGMEXEC
7099 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100 +#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102 +#endif
7103 +
7104 +#ifdef CONFIG_PAX_ASLR
7105 +#ifdef CONFIG_X86_32
7106 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107 +
7108 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#else
7111 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112 +
7113 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#endif
7116 +#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120 @@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124 - if (vdso_enabled) \
7125 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126 - (unsigned long)current->mm->context.vdso); \
7127 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131 @@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145 -#define arch_randomize_brk arch_randomize_brk
7146 -
7147 #endif /* _ASM_X86_ELF_H */
7148 diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7149 --- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150 +++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151 @@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155 -extern void machine_emergency_restart(void);
7156 +extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159 diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7160 --- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161 +++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162 @@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166 + typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178 + typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182 @@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186 - "+m" (*uaddr), "=&r" (tem) \
7187 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220 diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7221 --- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222 +++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227 -extern atomic_t irq_err_count;
7228 -extern atomic_t irq_mis_count;
7229 +extern atomic_unchecked_t irq_err_count;
7230 +extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234 diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7235 --- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236 +++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244 +#endif
7245 +
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256 +#endif
7257 +
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265 - in L1 during context switch. The best choices are unfortunately
7266 - different for UP and SMP */
7267 -#ifdef CONFIG_SMP
7268 -#define safe_address (__per_cpu_offset[0])
7269 -#else
7270 -#define safe_address (kstat_cpu(0).cpustat.user)
7271 -#endif
7272 + in L1 during context switch. */
7273 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281 - __save_init_fpu(me->task);
7282 + __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286 diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7287 --- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288 +++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295 +{
7296 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297 +}
7298 +
7299 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300 +{
7301 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302 +}
7303 +
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307 diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7308 --- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309 +++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318 +
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322 diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7323 --- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324 +++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329 -#define MAX_STACK_SIZE 64
7330 -#define MIN_STACK_SIZE(ADDR) \
7331 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332 - THREAD_SIZE - (unsigned long)(ADDR))) \
7333 - ? (MAX_STACK_SIZE) \
7334 - : (((unsigned long)current_thread_info()) + \
7335 - THREAD_SIZE - (unsigned long)(ADDR)))
7336 +#define MAX_STACK_SIZE 64UL
7337 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341 diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7342 --- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343 +++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344 @@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348 - atomic_t invlpg_counter;
7349 + atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357 -};
7358 +} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362 diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7363 --- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364 +++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365 @@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369 - asm volatile(_ASM_INC "%0"
7370 + asm volatile(_ASM_INC "%0\n"
7371 +
7372 +#ifdef CONFIG_PAX_REFCOUNT
7373 + "jno 0f\n"
7374 + _ASM_DEC "%0\n"
7375 + "int $4\n0:\n"
7376 + _ASM_EXTABLE(0b, 0b)
7377 +#endif
7378 +
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384 - asm volatile(_ASM_DEC "%0"
7385 + asm volatile(_ASM_DEC "%0\n"
7386 +
7387 +#ifdef CONFIG_PAX_REFCOUNT
7388 + "jno 0f\n"
7389 + _ASM_INC "%0\n"
7390 + "int $4\n0:\n"
7391 + _ASM_EXTABLE(0b, 0b)
7392 +#endif
7393 +
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399 - asm volatile(_ASM_ADD "%1,%0"
7400 + asm volatile(_ASM_ADD "%1,%0\n"
7401 +
7402 +#ifdef CONFIG_PAX_REFCOUNT
7403 + "jno 0f\n"
7404 + _ASM_SUB "%1,%0\n"
7405 + "int $4\n0:\n"
7406 + _ASM_EXTABLE(0b, 0b)
7407 +#endif
7408 +
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415 - asm volatile(_ASM_SUB "%1,%0"
7416 + asm volatile(_ASM_SUB "%1,%0\n"
7417 +
7418 +#ifdef CONFIG_PAX_REFCOUNT
7419 + "jno 0f\n"
7420 + _ASM_ADD "%1,%0\n"
7421 + "int $4\n0:\n"
7422 + _ASM_EXTABLE(0b, 0b)
7423 +#endif
7424 +
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7433 + asm volatile(_ASM_SUB "%2,%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 + "jno 0f\n"
7437 + _ASM_ADD "%2,%0\n"
7438 + "int $4\n0:\n"
7439 + _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 + "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450 - asm volatile(_ASM_DEC "%0; sete %1"
7451 + asm volatile(_ASM_DEC "%0\n"
7452 +
7453 +#ifdef CONFIG_PAX_REFCOUNT
7454 + "jno 0f\n"
7455 + _ASM_INC "%0\n"
7456 + "int $4\n0:\n"
7457 + _ASM_EXTABLE(0b, 0b)
7458 +#endif
7459 +
7460 + "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468 - asm volatile(_ASM_INC "%0; sete %1"
7469 + asm volatile(_ASM_INC "%0\n"
7470 +
7471 +#ifdef CONFIG_PAX_REFCOUNT
7472 + "jno 0f\n"
7473 + _ASM_DEC "%0\n"
7474 + "int $4\n0:\n"
7475 + _ASM_EXTABLE(0b, 0b)
7476 +#endif
7477 +
7478 + "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7487 + asm volatile(_ASM_ADD "%2,%0\n"
7488 +
7489 +#ifdef CONFIG_PAX_REFCOUNT
7490 + "jno 0f\n"
7491 + _ASM_SUB "%2,%0\n"
7492 + "int $4\n0:\n"
7493 + _ASM_EXTABLE(0b, 0b)
7494 +#endif
7495 +
7496 + "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504 - asm volatile(_ASM_XADD "%0, %1;"
7505 + asm volatile(_ASM_XADD "%0, %1\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + _ASM_MOV "%0,%1\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517 diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7518 --- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519 +++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520 @@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524 +#ifdef __KERNEL__
7525 +#ifndef __ASSEMBLY__
7526 +#ifdef CONFIG_X86_32
7527 +#define arch_mmap_check i386_mmap_check
7528 +int i386_mmap_check(unsigned long addr, unsigned long len,
7529 + unsigned long flags);
7530 +#endif
7531 +#endif
7532 +#endif
7533 +
7534 #endif /* _ASM_X86_MMAN_H */
7535 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7536 --- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537 +++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542 +
7543 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544 + unsigned int i;
7545 + pgd_t *pgd;
7546 +
7547 + pax_open_kernel();
7548 + pgd = get_cpu_pgd(smp_processor_id());
7549 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550 + set_pgd_batched(pgd+i, native_make_pgd(0));
7551 + pax_close_kernel();
7552 +#endif
7553 +
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562 + int tlbstate = TLBSTATE_OK;
7563 +#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568 + tlbstate = percpu_read(cpu_tlbstate.state);
7569 +#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576 +#ifdef CONFIG_PAX_PER_CPU_PGD
7577 + pax_open_kernel();
7578 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580 + pax_close_kernel();
7581 + load_cr3(get_cpu_pgd(cpu));
7582 +#else
7583 load_cr3(next->pgd);
7584 +#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592 - }
7593 +
7594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595 + if (!(__supported_pte_mask & _PAGE_NX)) {
7596 + smp_mb__before_clear_bit();
7597 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598 + smp_mb__after_clear_bit();
7599 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7600 + }
7601 +#endif
7602 +
7603 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605 + prev->context.user_cs_limit != next->context.user_cs_limit))
7606 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608 + else if (unlikely(tlbstate != TLBSTATE_OK))
7609 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610 +#endif
7611 +#endif
7612 +
7613 + }
7614 else {
7615 +
7616 +#ifdef CONFIG_PAX_PER_CPU_PGD
7617 + pax_open_kernel();
7618 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620 + pax_close_kernel();
7621 + load_cr3(get_cpu_pgd(cpu));
7622 +#endif
7623 +
7624 +#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632 +
7633 +#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635 +#endif
7636 +
7637 load_LDT_nolock(&next->context);
7638 +
7639 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640 + if (!(__supported_pte_mask & _PAGE_NX))
7641 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7642 +#endif
7643 +
7644 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645 +#ifdef CONFIG_PAX_PAGEEXEC
7646 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647 +#endif
7648 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649 +#endif
7650 +
7651 }
7652 - }
7653 #endif
7654 + }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7659 --- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660 +++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661 @@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665 - void *ldt;
7666 + struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670 @@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674 - void *vdso;
7675 + unsigned long vdso;
7676 +
7677 +#ifdef CONFIG_X86_32
7678 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679 + unsigned long user_cs_base;
7680 + unsigned long user_cs_limit;
7681 +
7682 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683 + cpumask_t cpu_user_cs_mask;
7684 +#endif
7685 +
7686 +#endif
7687 +#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691 diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7692 --- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693 +++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694 @@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698 +#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702 @@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706 -#ifdef CONFIG_X86_32
7707 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7709 +#define MODULE_PAX_UDEREF "UDEREF "
7710 +#else
7711 +#define MODULE_PAX_UDEREF ""
7712 +#endif
7713 +
7714 +#ifdef CONFIG_PAX_KERNEXEC
7715 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716 +#else
7717 +#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720 +#ifdef CONFIG_PAX_REFCOUNT
7721 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722 +#else
7723 +#define MODULE_PAX_REFCOUNT ""
7724 +#endif
7725 +
7726 +#ifdef CONFIG_GRKERNSEC
7727 +#define MODULE_GRSEC "GRSECURITY "
7728 +#else
7729 +#define MODULE_GRSEC ""
7730 +#endif
7731 +
7732 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733 +
7734 #endif /* _ASM_X86_MODULE_H */
7735 diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7736 --- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737 +++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742 -extern unsigned long phys_base;
7743 +extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7748 --- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749 +++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755 +{
7756 + pgdval_t val = native_pgd_val(pgd);
7757 +
7758 + if (sizeof(pgdval_t) > sizeof(long))
7759 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760 + val, (u64)val >> 32);
7761 + else
7762 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763 + val);
7764 +}
7765 +
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773 +#ifdef CONFIG_PAX_KERNEXEC
7774 +static inline unsigned long pax_open_kernel(void)
7775 +{
7776 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777 +}
7778 +
7779 +static inline unsigned long pax_close_kernel(void)
7780 +{
7781 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782 +}
7783 +#else
7784 +static inline unsigned long pax_open_kernel(void) { return 0; }
7785 +static inline unsigned long pax_close_kernel(void) { return 0; }
7786 +#endif
7787 +
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791 @@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795 -#define PARA_INDIRECT(addr) *%cs:addr
7796 +#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804 +
7805 +#define GET_CR0_INTO_RDI \
7806 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807 + mov %rax,%rdi
7808 +
7809 +#define SET_RDI_INTO_CR0 \
7810 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811 +
7812 +#define GET_CR3_INTO_RDI \
7813 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814 + mov %rax,%rdi
7815 +
7816 +#define SET_RDI_INTO_CR3 \
7817 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818 +
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7823 --- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824 +++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825 @@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829 -};
7830 +} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837 -};
7838 +} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843 -};
7844 +} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852 -};
7853 +} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861 -};
7862 +} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878 +
7879 +#ifdef CONFIG_PAX_KERNEXEC
7880 + unsigned long (*pax_open_kernel)(void);
7881 + unsigned long (*pax_close_kernel)(void);
7882 +#endif
7883 +
7884 };
7885
7886 struct arch_spinlock;
7887 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891 -};
7892 +} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896 diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7897 --- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898 +++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904 +}
7905 +
7906 +static inline void pmd_populate_user(struct mm_struct *mm,
7907 + pmd_t *pmd, pte_t *pte)
7908 +{
7909 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7914 --- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915 +++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920 + pax_open_kernel();
7921 *pmdp = pmd;
7922 + pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7927 --- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929 @@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933 -extern pgd_t swapper_pg_dir[1024];
7934 -extern pgd_t initial_page_table[1024];
7935 -
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7945 +#ifdef CONFIG_X86_PAE
7946 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947 +#endif
7948 +
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956 + pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958 + pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962 @@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966 +#define HAVE_ARCH_UNMAPPED_AREA
7967 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968 +
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
7973 --- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975 @@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979 -# define PMD_SIZE (1UL << PMD_SHIFT)
7980 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988 +#ifdef CONFIG_PAX_KERNEXEC
7989 +#ifndef __ASSEMBLY__
7990 +extern unsigned char MODULES_EXEC_VADDR[];
7991 +extern unsigned char MODULES_EXEC_END[];
7992 +#endif
7993 +#include <asm/boot.h>
7994 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996 +#else
7997 +#define ktla_ktva(addr) (addr)
7998 +#define ktva_ktla(addr) (addr)
7999 +#endif
8000 +
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8005 --- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006 +++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011 + pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013 + pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018 + pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020 + pax_close_kernel();
8021 }
8022
8023 /*
8024 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8025 --- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027 @@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031 +extern pud_t level3_vmalloc_pgt[512];
8032 +extern pud_t level3_vmemmap_pgt[512];
8033 +extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036 -extern pmd_t level2_ident_pgt[512];
8037 -extern pgd_t init_level4_pgt[];
8038 +extern pmd_t level2_ident_pgt[512*2];
8039 +extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047 + pax_open_kernel();
8048 *pmdp = pmd;
8049 + pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057 + pax_open_kernel();
8058 + *pgdp = pgd;
8059 + pax_close_kernel();
8060 +}
8061 +
8062 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063 +{
8064 *pgdp = pgd;
8065 }
8066
8067 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8068 --- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074 +#define MODULES_EXEC_VADDR MODULES_VADDR
8075 +#define MODULES_EXEC_END MODULES_END
8076 +
8077 +#define ktla_ktva(addr) (addr)
8078 +#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8082 --- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083 +++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096 +#define pax_open_kernel() native_pax_open_kernel()
8097 +#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102 +
8103 +#ifdef CONFIG_PAX_KERNEXEC
8104 +static inline unsigned long native_pax_open_kernel(void)
8105 +{
8106 + unsigned long cr0;
8107 +
8108 + preempt_disable();
8109 + barrier();
8110 + cr0 = read_cr0() ^ X86_CR0_WP;
8111 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112 + write_cr0(cr0);
8113 + return cr0 ^ X86_CR0_WP;
8114 +}
8115 +
8116 +static inline unsigned long native_pax_close_kernel(void)
8117 +{
8118 + unsigned long cr0;
8119 +
8120 + cr0 = read_cr0() ^ X86_CR0_WP;
8121 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122 + write_cr0(cr0);
8123 + barrier();
8124 + preempt_enable_no_resched();
8125 + return cr0 ^ X86_CR0_WP;
8126 +}
8127 +#else
8128 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130 +#endif
8131 +
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136 +static inline int pte_user(pte_t pte)
8137 +{
8138 + return pte_val(pte) & _PAGE_USER;
8139 +}
8140 +
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148 +static inline pte_t pte_mkread(pte_t pte)
8149 +{
8150 + return __pte(pte_val(pte) | _PAGE_USER);
8151 +}
8152 +
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155 - return pte_clear_flags(pte, _PAGE_NX);
8156 +#ifdef CONFIG_X86_PAE
8157 + if (__supported_pte_mask & _PAGE_NX)
8158 + return pte_clear_flags(pte, _PAGE_NX);
8159 + else
8160 +#endif
8161 + return pte_set_flags(pte, _PAGE_USER);
8162 +}
8163 +
8164 +static inline pte_t pte_exprotect(pte_t pte)
8165 +{
8166 +#ifdef CONFIG_X86_PAE
8167 + if (__supported_pte_mask & _PAGE_NX)
8168 + return pte_set_flags(pte, _PAGE_NX);
8169 + else
8170 +#endif
8171 + return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179 +
8180 +#ifdef CONFIG_PAX_PER_CPU_PGD
8181 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183 +{
8184 + return cpu_pgd[cpu];
8185 +}
8186 +#endif
8187 +
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206 +
8207 +#ifdef CONFIG_PAX_PER_CPU_PGD
8208 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209 +#endif
8210 +
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218 +#ifdef CONFIG_X86_32
8219 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220 +#else
8221 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223 +
8224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8225 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226 +#else
8227 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228 +#endif
8229 +
8230 +#endif
8231 +
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242 - memcpy(dst, src, count * sizeof(pgd_t));
8243 + pax_open_kernel();
8244 + while (count--)
8245 + *dst++ = *src++;
8246 + pax_close_kernel();
8247 }
8248
8249 +#ifdef CONFIG_PAX_PER_CPU_PGD
8250 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251 +#endif
8252 +
8253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255 +#else
8256 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257 +#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8262 --- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263 +++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264 @@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281 @@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289 @@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293 -#else
8294 +#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296 +#else
8297 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301 @@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307 +
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311 @@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322 @@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337 +#endif
8338
8339 +#if PAGETABLE_LEVELS == 3
8340 +#include <asm-generic/pgtable-nopud.h>
8341 +#endif
8342 +
8343 +#if PAGETABLE_LEVELS == 2
8344 +#include <asm-generic/pgtable-nopmd.h>
8345 +#endif
8346 +
8347 +#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355 -#include <asm-generic/pgtable-nopud.h>
8356 -
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364 -#include <asm-generic/pgtable-nopmd.h>
8365 -
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373 -extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377 diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8378 --- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379 +++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380 @@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385 +extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393 +
8394 +#ifdef CONFIG_PAX_SEGMEXEC
8395 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397 +#else
8398 #define STACK_TOP TASK_SIZE
8399 -#define STACK_TOP_MAX STACK_TOP
8400 +#endif
8401 +
8402 +#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423 -#define KSTK_TOP(info) \
8424 -({ \
8425 - unsigned long *__ptr = (unsigned long *)(info); \
8426 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427 -})
8428 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452 - 0xc0000000 : 0xFFFFe000)
8453 + 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475 +#ifdef CONFIG_PAX_SEGMEXEC
8476 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477 +#endif
8478 +
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482 diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8483 --- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484 +++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489 - * user_mode_vm(regs) determines whether a register set came from user mode.
8490 + * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496 + * be used.
8497 */
8498 -static inline int user_mode(struct pt_regs *regs)
8499 +static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504 - return !!(regs->cs & 3);
8505 + return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509 -static inline int user_mode_vm(struct pt_regs *regs)
8510 +static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516 - return user_mode(regs);
8517 + return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521 diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8522 --- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523 +++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524 @@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528 - void (*restart)(char *cmd);
8529 - void (*halt)(void);
8530 - void (*power_off)(void);
8531 + void (* __noreturn restart)(char *cmd);
8532 + void (* __noreturn halt)(void);
8533 + void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536 - void (*emergency_restart)(void);
8537 -};
8538 + void (* __noreturn emergency_restart)(void);
8539 +} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545 -void machine_real_restart(unsigned int type);
8546 +void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550 diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8551 --- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552 +++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557 +
8558 +#ifdef CONFIG_PAX_REFCOUNT
8559 + "jno 0f\n"
8560 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8561 + "int $4\n0:\n"
8562 + _ASM_EXTABLE(0b, 0b)
8563 +#endif
8564 +
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572 +
8573 +#ifdef CONFIG_PAX_REFCOUNT
8574 + "jno 0f\n"
8575 + "sub %3,%2\n"
8576 + "int $4\n0:\n"
8577 + _ASM_EXTABLE(0b, 0b)
8578 +#endif
8579 +
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587 +
8588 +#ifdef CONFIG_PAX_REFCOUNT
8589 + "jno 0f\n"
8590 + "mov %1,(%2)\n"
8591 + "int $4\n0:\n"
8592 + _ASM_EXTABLE(0b, 0b)
8593 +#endif
8594 +
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602 +
8603 +#ifdef CONFIG_PAX_REFCOUNT
8604 + "jno 0f\n"
8605 + "mov %1,(%2)\n"
8606 + "int $4\n0:\n"
8607 + _ASM_EXTABLE(0b, 0b)
8608 +#endif
8609 +
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617 +
8618 +#ifdef CONFIG_PAX_REFCOUNT
8619 + "jno 0f\n"
8620 + "mov %1,(%2)\n"
8621 + "int $4\n0:\n"
8622 + _ASM_EXTABLE(0b, 0b)
8623 +#endif
8624 +
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632 +
8633 +#ifdef CONFIG_PAX_REFCOUNT
8634 + "jno 0f\n"
8635 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636 + "int $4\n0:\n"
8637 + _ASM_EXTABLE(0b, 0b)
8638 +#endif
8639 +
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649 +
8650 +#ifdef CONFIG_PAX_REFCOUNT
8651 + "jno 0f\n"
8652 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653 + "int $4\n0:\n"
8654 + _ASM_EXTABLE(0b, 0b)
8655 +#endif
8656 +
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8665 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666 +
8667 +#ifdef CONFIG_PAX_REFCOUNT
8668 + "jno 0f\n"
8669 + "mov %0,%1\n"
8670 + "int $4\n0:\n"
8671 + _ASM_EXTABLE(0b, 0b)
8672 +#endif
8673 +
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677 diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8678 --- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679 +++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680 @@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684 - * 29 - unused
8685 - * 30 - unused
8686 + * 29 - PCI BIOS CS
8687 + * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691 @@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696 +
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700 @@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706 +
8707 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709 +
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713 @@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722 @@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727 +
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731 @@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739 diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8740 --- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741 +++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746 -DECLARE_PER_CPU(int, cpu_number);
8747 +DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751 @@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755 -};
8756 +} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8765 -
8766 -#define stack_smp_processor_id() \
8767 -({ \
8768 - struct thread_info *ti; \
8769 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770 - ti->cpu; \
8771 -})
8772 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8773 +#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777 diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8778 --- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779 +++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + LOCK_PREFIX " addl $1,(%0)\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799 +
8800 +#ifdef CONFIG_PAX_REFCOUNT
8801 + "jno 0f\n"
8802 + LOCK_PREFIX " addl %1,(%0)\n"
8803 + "int $4\n0:\n"
8804 + _ASM_EXTABLE(0b, 0b)
8805 +#endif
8806 +
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815 + asm volatile(LOCK_PREFIX "incl %0\n"
8816 +
8817 +#ifdef CONFIG_PAX_REFCOUNT
8818 + "jno 0f\n"
8819 + LOCK_PREFIX "decl %0\n"
8820 + "int $4\n0:\n"
8821 + _ASM_EXTABLE(0b, 0b)
8822 +#endif
8823 +
8824 + :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829 - asm volatile(LOCK_PREFIX "addl %1, %0"
8830 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831 +
8832 +#ifdef CONFIG_PAX_REFCOUNT
8833 + "jno 0f\n"
8834 + LOCK_PREFIX "subl %1, %0\n"
8835 + "int $4\n0:\n"
8836 + _ASM_EXTABLE(0b, 0b)
8837 +#endif
8838 +
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842 diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8843 --- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844 +++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845 @@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858 -#ifdef CONFIG_X86_32
8859 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863 diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8864 --- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865 +++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866 @@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870 -struct thread_info;
8871 +struct task_struct;
8872 struct stacktrace_ops;
8873
8874 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875 - unsigned long *stack,
8876 - unsigned long bp,
8877 - const struct stacktrace_ops *ops,
8878 - void *data,
8879 - unsigned long *end,
8880 - int *graph);
8881 -
8882 -extern unsigned long
8883 -print_context_stack(struct thread_info *tinfo,
8884 - unsigned long *stack, unsigned long bp,
8885 - const struct stacktrace_ops *ops, void *data,
8886 - unsigned long *end, int *graph);
8887 -
8888 -extern unsigned long
8889 -print_context_stack_bp(struct thread_info *tinfo,
8890 - unsigned long *stack, unsigned long bp,
8891 - const struct stacktrace_ops *ops, void *data,
8892 - unsigned long *end, int *graph);
8893 +typedef unsigned long walk_stack_t(struct task_struct *task,
8894 + void *stack_start,
8895 + unsigned long *stack,
8896 + unsigned long bp,
8897 + const struct stacktrace_ops *ops,
8898 + void *data,
8899 + unsigned long *end,
8900 + int *graph);
8901 +
8902 +extern walk_stack_t print_context_stack;
8903 +extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911 - walk_stack_t walk_stack;
8912 + walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916 diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8917 --- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918 +++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919 @@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928 @@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933 + [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941 - return __limit + 1;
8942 + return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946 @@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950 -extern unsigned long arch_align_stack(unsigned long sp);
8951 +#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956 -void stop_this_cpu(void *dummy);
8957 +void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961 diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
8962 --- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963 +++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964 @@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968 +#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972 @@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976 - struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980 @@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984 -#ifdef CONFIG_X86_32
8985 - unsigned long previous_esp; /* ESP of the previous stack in
8986 - case of nested (IRQ) stacks
8987 - */
8988 - __u8 supervisor_stack[0];
8989 -#endif
8990 + unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994 -#define INIT_THREAD_INFO(tsk) \
8995 +#define INIT_THREAD_INFO \
8996 { \
8997 - .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001 @@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005 -#define init_thread_info (init_thread_union.thread_info)
9006 +#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010 @@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014 +#ifdef __ASSEMBLY__
9015 +/* how to get the thread information struct from ASM */
9016 +#define GET_THREAD_INFO(reg) \
9017 + mov PER_CPU_VAR(current_tinfo), reg
9018 +
9019 +/* use this one if reg already contains %esp */
9020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021 +#else
9022 +/* how to get the thread information struct from C */
9023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024 +
9025 +static __always_inline struct thread_info *current_thread_info(void)
9026 +{
9027 + return percpu_read_stable(current_tinfo);
9028 +}
9029 +#endif
9030 +
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034 @@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038 -
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042 -/* how to get the thread information struct from C */
9043 -static inline struct thread_info *current_thread_info(void)
9044 -{
9045 - return (struct thread_info *)
9046 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9047 -}
9048 -
9049 -#else /* !__ASSEMBLY__ */
9050 -
9051 -/* how to get the thread information struct from ASM */
9052 -#define GET_THREAD_INFO(reg) \
9053 - movl $-THREAD_SIZE, reg; \
9054 - andl %esp, reg
9055 -
9056 -/* use this one if reg already contains %esp */
9057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9058 - andl $-THREAD_SIZE, reg
9059 -
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064 -#include <asm/percpu.h>
9065 -#define KERNEL_STACK_OFFSET (5*8)
9066 -
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074 -static inline struct thread_info *current_thread_info(void)
9075 -{
9076 - struct thread_info *ti;
9077 - ti = (void *)(percpu_read_stable(kernel_stack) +
9078 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9079 - return ti;
9080 -}
9081 -
9082 -#else /* !__ASSEMBLY__ */
9083 -
9084 -/* how to get the thread information struct from ASM */
9085 -#define GET_THREAD_INFO(reg) \
9086 - movq PER_CPU_VAR(kernel_stack),reg ; \
9087 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088 -
9089 +/* how to get the current stack pointer from C */
9090 +register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098 +
9099 +#define __HAVE_THREAD_FUNCTIONS
9100 +#define task_thread_info(task) (&(task)->tinfo)
9101 +#define task_stack_page(task) ((task)->stack)
9102 +#define setup_thread_stack(p, org) do {} while (0)
9103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104 +
9105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106 +extern struct task_struct *alloc_task_struct_node(int node);
9107 +extern void free_task_struct(struct task_struct *);
9108 +
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9112 --- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113 +++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118 + pax_track_stack();
9119 +
9120 + if ((long)n < 0)
9121 + return n;
9122 +
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130 + if (!__builtin_constant_p(n))
9131 + check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139 +
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146 + if ((long)n < 0)
9147 + return n;
9148 +
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156 +
9157 + pax_track_stack();
9158 +
9159 + if ((long)n < 0)
9160 + return n;
9161 +
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169 + if (!__builtin_constant_p(n))
9170 + check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178 +
9179 + if ((long)n < 0)
9180 + return n;
9181 +
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9190 -}
9191 + if ((long)n < 0)
9192 + return n;
9193
9194 -unsigned long __must_check copy_to_user(void __user *to,
9195 - const void *from, unsigned long n);
9196 -unsigned long __must_check _copy_from_user(void *to,
9197 - const void __user *from,
9198 - unsigned long n);
9199 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9200 +}
9201
9202 +extern void copy_to_user_overflow(void)
9203 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9205 +#else
9206 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207 +#endif
9208 +;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216 -static inline unsigned long __must_check copy_from_user(void *to,
9217 - const void __user *from,
9218 - unsigned long n)
9219 +/**
9220 + * copy_to_user: - Copy a block of data into user space.
9221 + * @to: Destination address, in user space.
9222 + * @from: Source address, in kernel space.
9223 + * @n: Number of bytes to copy.
9224 + *
9225 + * Context: User context only. This function may sleep.
9226 + *
9227 + * Copy data from kernel space to user space.
9228 + *
9229 + * Returns number of bytes that could not be copied.
9230 + * On success, this will be zero.
9231 + */
9232 +static inline unsigned long __must_check
9233 +copy_to_user(void __user *to, const void *from, unsigned long n)
9234 +{
9235 + int sz = __compiletime_object_size(from);
9236 +
9237 + if (unlikely(sz != -1 && sz < n))
9238 + copy_to_user_overflow();
9239 + else if (access_ok(VERIFY_WRITE, to, n))
9240 + n = __copy_to_user(to, from, n);
9241 + return n;
9242 +}
9243 +
9244 +/**
9245 + * copy_from_user: - Copy a block of data from user space.
9246 + * @to: Destination address, in kernel space.
9247 + * @from: Source address, in user space.
9248 + * @n: Number of bytes to copy.
9249 + *
9250 + * Context: User context only. This function may sleep.
9251 + *
9252 + * Copy data from user space to kernel space.
9253 + *
9254 + * Returns number of bytes that could not be copied.
9255 + * On success, this will be zero.
9256 + *
9257 + * If some data could not be copied, this function will pad the copied
9258 + * data to the requested size using zero bytes.
9259 + */
9260 +static inline unsigned long __must_check
9261 +copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265 - if (likely(sz == -1 || sz >= n))
9266 - n = _copy_from_user(to, from, n);
9267 - else
9268 + if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270 -
9271 + else if (access_ok(VERIFY_READ, from, n))
9272 + n = __copy_from_user(to, from, n);
9273 + else if ((long)n > 0) {
9274 + if (!__builtin_constant_p(n))
9275 + check_object_size(to, n, false);
9276 + memset(to, 0, n);
9277 + }
9278 return n;
9279 }
9280
9281 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9282 --- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283 +++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284 @@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288 +#include <asm/pgtable.h>
9289 +
9290 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298 -__must_check unsigned long
9299 -_copy_to_user(void __user *to, const void *from, unsigned len);
9300 -__must_check unsigned long
9301 -_copy_from_user(void *to, const void __user *from, unsigned len);
9302 +static __always_inline __must_check unsigned long
9303 +__copy_to_user(void __user *to, const void *from, unsigned len);
9304 +static __always_inline __must_check unsigned long
9305 +__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311 - unsigned long n)
9312 + unsigned n)
9313 {
9314 - int sz = __compiletime_object_size(to);
9315 -
9316 might_fault();
9317 - if (likely(sz == -1 || sz >= n))
9318 - n = _copy_from_user(to, from, n);
9319 -#ifdef CONFIG_DEBUG_VM
9320 - else
9321 - WARN(1, "Buffer overflow detected!\n");
9322 -#endif
9323 +
9324 + if (access_ok(VERIFY_READ, from, n))
9325 + n = __copy_from_user(to, from, n);
9326 + else if ((int)n > 0) {
9327 + if (!__builtin_constant_p(n))
9328 + check_object_size(to, n, false);
9329 + memset(to, 0, n);
9330 + }
9331 return n;
9332 }
9333
9334 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338 - return _copy_to_user(dst, src, size);
9339 + if (access_ok(VERIFY_WRITE, dst, size))
9340 + size = __copy_to_user(dst, src, size);
9341 + return size;
9342 }
9343
9344 static __always_inline __must_check
9345 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348 - int ret = 0;
9349 + int sz = __compiletime_object_size(dst);
9350 + unsigned ret = 0;
9351
9352 might_fault();
9353 - if (!__builtin_constant_p(size))
9354 - return copy_user_generic(dst, (__force void *)src, size);
9355 +
9356 + pax_track_stack();
9357 +
9358 + if ((int)size < 0)
9359 + return size;
9360 +
9361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9362 + if (!__access_ok(VERIFY_READ, src, size))
9363 + return size;
9364 +#endif
9365 +
9366 + if (unlikely(sz != -1 && sz < size)) {
9367 +#ifdef CONFIG_DEBUG_VM
9368 + WARN(1, "Buffer overflow detected!\n");
9369 +#endif
9370 + return size;
9371 + }
9372 +
9373 + if (!__builtin_constant_p(size)) {
9374 + check_object_size(dst, size, false);
9375 +
9376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9377 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378 + src += PAX_USER_SHADOW_BASE;
9379 +#endif
9380 +
9381 + return copy_user_generic(dst, (__force const void *)src, size);
9382 + }
9383 switch (size) {
9384 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407 - (u16 __user *)(8 + (char __user *)src),
9408 + (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418 - (u64 __user *)(8 + (char __user *)src),
9419 + (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423 - return copy_user_generic(dst, (__force void *)src, size);
9424 +
9425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9426 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427 + src += PAX_USER_SHADOW_BASE;
9428 +#endif
9429 +
9430 + return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438 - int ret = 0;
9439 + int sz = __compiletime_object_size(src);
9440 + unsigned ret = 0;
9441
9442 might_fault();
9443 - if (!__builtin_constant_p(size))
9444 +
9445 + pax_track_stack();
9446 +
9447 + if ((int)size < 0)
9448 + return size;
9449 +
9450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9451 + if (!__access_ok(VERIFY_WRITE, dst, size))
9452 + return size;
9453 +#endif
9454 +
9455 + if (unlikely(sz != -1 && sz < size)) {
9456 +#ifdef CONFIG_DEBUG_VM
9457 + WARN(1, "Buffer overflow detected!\n");
9458 +#endif
9459 + return size;
9460 + }
9461 +
9462 + if (!__builtin_constant_p(size)) {
9463 + check_object_size(src, size, true);
9464 +
9465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9466 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467 + dst += PAX_USER_SHADOW_BASE;
9468 +#endif
9469 +
9470 return copy_user_generic((__force void *)dst, src, size);
9471 + }
9472 switch (size) {
9473 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512 +
9513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9514 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515 + dst += PAX_USER_SHADOW_BASE;
9516 +#endif
9517 +
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526 - int ret = 0;
9527 + unsigned ret = 0;
9528
9529 might_fault();
9530 - if (!__builtin_constant_p(size))
9531 +
9532 + if ((int)size < 0)
9533 + return size;
9534 +
9535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9536 + if (!__access_ok(VERIFY_READ, src, size))
9537 + return size;
9538 + if (!__access_ok(VERIFY_WRITE, dst, size))
9539 + return size;
9540 +#endif
9541 +
9542 + if (!__builtin_constant_p(size)) {
9543 +
9544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9545 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546 + src += PAX_USER_SHADOW_BASE;
9547 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548 + dst += PAX_USER_SHADOW_BASE;
9549 +#endif
9550 +
9551 return copy_user_generic((__force void *)dst,
9552 - (__force void *)src, size);
9553 + (__force const void *)src, size);
9554 + }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558 - __get_user_asm(tmp, (u8 __user *)src,
9559 + __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567 - __get_user_asm(tmp, (u16 __user *)src,
9568 + __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576 - __get_user_asm(tmp, (u32 __user *)src,
9577 + __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585 - __get_user_asm(tmp, (u64 __user *)src,
9586 + __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594 +
9595 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9596 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597 + src += PAX_USER_SHADOW_BASE;
9598 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599 + dst += PAX_USER_SHADOW_BASE;
9600 +#endif
9601 +
9602 return copy_user_generic((__force void *)dst,
9603 - (__force void *)src, size);
9604 + (__force const void *)src, size);
9605 }
9606 }
9607
9608 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612 + pax_track_stack();
9613 +
9614 + if ((int)size < 0)
9615 + return size;
9616 +
9617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9618 + if (!__access_ok(VERIFY_READ, src, size))
9619 + return size;
9620 +
9621 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622 + src += PAX_USER_SHADOW_BASE;
9623 +#endif
9624 +
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628 -static __must_check __always_inline int
9629 +static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632 + if ((int)size < 0)
9633 + return size;
9634 +
9635 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9636 + if (!__access_ok(VERIFY_WRITE, dst, size))
9637 + return size;
9638 +
9639 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640 + dst += PAX_USER_SHADOW_BASE;
9641 +#endif
9642 +
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646 -extern long __copy_user_nocache(void *dst, const void __user *src,
9647 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650 -static inline int
9651 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655 +
9656 + if ((int)size < 0)
9657 + return size;
9658 +
9659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9660 + if (!__access_ok(VERIFY_READ, src, size))
9661 + return size;
9662 +#endif
9663 +
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667 -static inline int
9668 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672 + if ((int)size < 0)
9673 + return size;
9674 +
9675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9676 + if (!__access_ok(VERIFY_READ, src, size))
9677 + return size;
9678 +#endif
9679 +
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683 -unsigned long
9684 +extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9689 --- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690 +++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691 @@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695 +#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703 +
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707 @@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712 +void __set_fs(mm_segment_t x);
9713 +void set_fs(mm_segment_t x);
9714 +#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716 +#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720 @@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define access_ok(type, addr, size) \
9727 +({ \
9728 + long __size = size; \
9729 + unsigned long __addr = (unsigned long)addr; \
9730 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9731 + unsigned long __end_ao = __addr + __size - 1; \
9732 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734 + while(__addr_ao <= __end_ao) { \
9735 + char __c_ao; \
9736 + __addr_ao += PAGE_SIZE; \
9737 + if (__size > PAGE_SIZE) \
9738 + cond_resched(); \
9739 + if (__get_user(__c_ao, (char __user *)__addr)) \
9740 + break; \
9741 + if (type != VERIFY_WRITE) { \
9742 + __addr = __addr_ao; \
9743 + continue; \
9744 + } \
9745 + if (__put_user(__c_ao, (char __user *)__addr)) \
9746 + break; \
9747 + __addr = __addr_ao; \
9748 + } \
9749 + } \
9750 + __ret_ao; \
9751 +})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759 -
9760 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761 +#define __copyuser_seg "gs;"
9762 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764 +#else
9765 +#define __copyuser_seg
9766 +#define __COPYUSER_SET_ES
9767 +#define __COPYUSER_RESTORE_ES
9768 +#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772 - asm volatile("1: movl %%eax,0(%2)\n" \
9773 - "2: movl %%edx,4(%2)\n" \
9774 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783 - asm volatile("1: movl %%eax,0(%1)\n" \
9784 - "2: movl %%edx,4(%1)\n" \
9785 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790 @@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799 @@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803 - : "=r" (err), ltype(x) \
9804 + : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808 @@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817 @@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822 + (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828 -#define __m(x) (*(struct __large_struct __user *)(x))
9829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830 +#define ____m(x) \
9831 +({ \
9832 + unsigned long ____x = (unsigned long)(x); \
9833 + if (____x < PAX_USER_SHADOW_BASE) \
9834 + ____x += PAX_USER_SHADOW_BASE; \
9835 + (void __user *)____x; \
9836 +})
9837 +#else
9838 +#define ____m(x) (x)
9839 +#endif
9840 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871 +#define __get_user(x, ptr) get_user((x), (ptr))
9872 +#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875 +#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884 +#define __put_user(x, ptr) put_user((x), (ptr))
9885 +#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888 +#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897 + (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901 diff -urNp linux-3.0.4/arch/x86/include/asm/vgtod.h linux-3.0.4/arch/x86/include/asm/vgtod.h
9902 --- linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903 +++ linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908 + char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912 diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9913 --- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914 +++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919 -};
9920 +} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924 @@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928 -};
9929 +} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937 -};
9938 +} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946 -};
9947 +} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951 @@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955 -};
9956 +} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964 -};
9965 +} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969 @@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973 -};
9974 +} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978 @@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982 -};
9983 +} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987 @@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991 -};
9992 +} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996 @@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000 -};
10001 +} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005 @@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009 -};
10010 +} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018 -};
10019 +} __no_const;
10020
10021 struct pci_dev;
10022
10023 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027 -};
10028 +} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032 diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10033 --- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034 +++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042 +#endif
10043 +
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054 +#endif
10055 +
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059 diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10060 --- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061 +++ linux-3.0.4/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062 @@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066 - depends on X86_32 && !CC_STACKPROTECTOR
10067 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071 @@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075 - depends on !X86_NUMAQ
10076 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084 - depends on !X86_NUMAQ
10085 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093 - default 0x78000000 if VMSPLIT_2G_OPT
10094 + default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102 - depends on ACPI
10103 + depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107 @@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111 + depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119 + range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127 + range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135 - def_bool y
10136 + def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143 diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10144 --- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145 +++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150 - depends on M586MMX || M586TSC || M586 || M486 || M386
10151 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173 diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10174 --- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175 +++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176 @@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180 - depends on DEBUG_KERNEL
10181 + depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189 - depends on MODULES
10190 + depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10195 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201 +ifdef CONSTIFY_PLUGIN
10202 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203 +endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10208 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210 @@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10215 + call verify_cpu
10216 +
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220 @@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224 +# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10229 --- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230 +++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235 +
10236 + pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239 + pax_close_kernel();
10240 +
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10245 --- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246 +++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251 - movl %cs:saved_magic, %eax
10252 - cmpl $0x12345678, %eax
10253 + cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257 - movl saved_eip, %eax
10258 - jmp *%eax
10259 + jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263 diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10264 --- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265 +++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270 - if (*ptr == 0x3e)
10271 + if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279 - if (*ptr == 0xf0)
10280 + if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288 - memcpy(insnbuf, p->instr, p->len);
10289 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297 - (unsigned long)__smp_locks_end);
10298 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312 - memcpy(addr, opcode, len);
10313 +
10314 + pax_open_kernel();
10315 + memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317 + pax_close_kernel();
10318 +
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326 - unsigned long flags;
10327 - char *vaddr;
10328 + unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330 - int i;
10331 + size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334 - pages[0] = vmalloc_to_page(addr);
10335 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336 + pages[0] = vmalloc_to_page(vaddr);
10337 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339 - pages[0] = virt_to_page(addr);
10340 + pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10343 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346 - local_irq_save(flags);
10347 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348 - if (pages[1])
10349 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352 - clear_fixmap(FIX_TEXT_POKE0);
10353 - if (pages[1])
10354 - clear_fixmap(FIX_TEXT_POKE1);
10355 - local_flush_tlb();
10356 - sync_core();
10357 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358 - that causes hangs on some VIA CPUs. */
10359 + text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362 - local_irq_restore(flags);
10363 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367 diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10368 --- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369 +++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374 -unsigned int apic_verbosity;
10375 +int apic_verbosity;
10376
10377 int pic_mode;
10378
10379 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383 - atomic_inc(&irq_err_count);
10384 + atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392 + pax_track_stack();
10393 +
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397 diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10398 --- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399 +++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404 -void lock_vector_lock(void)
10405 +void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413 -void unlock_vector_lock(void)
10414 +void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422 -atomic_t irq_mis_count;
10423 +atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431 - atomic_inc(&irq_mis_count);
10432 + atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436 diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10437 --- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438 +++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452 +
10453 + pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455 + pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463 +
10464 + pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466 + pax_close_kernel();
10467 +
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475 +
10476 + pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478 + pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486 +
10487 + pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489 + pax_close_kernel();
10490 +
10491 put_cpu();
10492 return error;
10493 }
10494 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498 +
10499 + pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506 + pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10511 --- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512 +++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513 @@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517 + DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10522 --- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523 +++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524 @@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533 @@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537 +
10538 +#ifdef CONFIG_PAX_KERNEXEC
10539 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540 +#endif
10541 +
10542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10543 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545 +#ifdef CONFIG_X86_64
10546 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547 +#endif
10548 #endif
10549
10550 +#endif
10551 +
10552 + BLANK();
10553 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556 +
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10561 --- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562 +++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567 - if ((c->x86 == 6)) {
10568 + if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10573 --- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574 +++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580 -#ifdef CONFIG_X86_64
10581 - /*
10582 - * We need valid kernel segments for data and code in long mode too
10583 - * IRET will check the segment types kkeil 2000/10/28
10584 - * Also sysret mandates a special GDT layout
10585 - *
10586 - * TLS descriptors are currently at a different place compared to i386.
10587 - * Hopefully nobody expects them at a fixed place (Wine?)
10588 - */
10589 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595 -#else
10596 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600 - /*
10601 - * Segments used for calling PnP BIOS have byte granularity.
10602 - * They code segments and data segments have fixed 64k limits,
10603 - * the transfer segment sizes are set at run time.
10604 - */
10605 - /* 32-bit code */
10606 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607 - /* 16-bit code */
10608 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609 - /* 16-bit data */
10610 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611 - /* 16-bit data */
10612 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613 - /* 16-bit data */
10614 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615 - /*
10616 - * The APM segments have byte granularity and their bases
10617 - * are set at run time. All have 64k limits.
10618 - */
10619 - /* 32-bit code */
10620 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621 - /* 16-bit code */
10622 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623 - /* data */
10624 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625 -
10626 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 - GDT_STACK_CANARY_INIT
10629 -#endif
10630 -} };
10631 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632 -
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10651 +#endif
10652 +
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662 +
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679 - regs->gs = __KERNEL_STACK_CANARY;
10680 + savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688 - t = &per_cpu(init_tss, cpu);
10689 + t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697 - load_idt((const struct desc_ptr *)&idt_descr);
10698 + load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706 - x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10715 + struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10720 --- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:26:13.000000000 -0400
10721 +++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10722 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10732 --- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10733 +++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10734 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10735 CFLAGS_REMOVE_perf_event.o = -pg
10736 endif
10737
10738 -# Make sure load_percpu_segment has no stackprotector
10739 -nostackp := $(call cc-option, -fno-stack-protector)
10740 -CFLAGS_common.o := $(nostackp)
10741 -
10742 obj-y := intel_cacheinfo.o scattered.o topology.o
10743 obj-y += proc.o capflags.o powerflags.o common.o
10744 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10745 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10746 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10747 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10748 @@ -46,6 +46,7 @@
10749 #include <asm/ipi.h>
10750 #include <asm/mce.h>
10751 #include <asm/msr.h>
10752 +#include <asm/local.h>
10753
10754 #include "mce-internal.h"
10755
10756 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10757 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10758 m->cs, m->ip);
10759
10760 - if (m->cs == __KERNEL_CS)
10761 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10762 print_symbol("{%s}", m->ip);
10763 pr_cont("\n");
10764 }
10765 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10766
10767 #define PANIC_TIMEOUT 5 /* 5 seconds */
10768
10769 -static atomic_t mce_paniced;
10770 +static atomic_unchecked_t mce_paniced;
10771
10772 static int fake_panic;
10773 -static atomic_t mce_fake_paniced;
10774 +static atomic_unchecked_t mce_fake_paniced;
10775
10776 /* Panic in progress. Enable interrupts and wait for final IPI */
10777 static void wait_for_panic(void)
10778 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10779 /*
10780 * Make sure only one CPU runs in machine check panic
10781 */
10782 - if (atomic_inc_return(&mce_paniced) > 1)
10783 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10784 wait_for_panic();
10785 barrier();
10786
10787 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10788 console_verbose();
10789 } else {
10790 /* Don't log too much for fake panic */
10791 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10792 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10793 return;
10794 }
10795 /* First print corrected ones that are still unlogged */
10796 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10797 * might have been modified by someone else.
10798 */
10799 rmb();
10800 - if (atomic_read(&mce_paniced))
10801 + if (atomic_read_unchecked(&mce_paniced))
10802 wait_for_panic();
10803 if (!monarch_timeout)
10804 goto out;
10805 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10806 */
10807
10808 static DEFINE_SPINLOCK(mce_state_lock);
10809 -static int open_count; /* #times opened */
10810 +static local_t open_count; /* #times opened */
10811 static int open_exclu; /* already open exclusive? */
10812
10813 static int mce_open(struct inode *inode, struct file *file)
10814 {
10815 spin_lock(&mce_state_lock);
10816
10817 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10818 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10819 spin_unlock(&mce_state_lock);
10820
10821 return -EBUSY;
10822 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10823
10824 if (file->f_flags & O_EXCL)
10825 open_exclu = 1;
10826 - open_count++;
10827 + local_inc(&open_count);
10828
10829 spin_unlock(&mce_state_lock);
10830
10831 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10832 {
10833 spin_lock(&mce_state_lock);
10834
10835 - open_count--;
10836 + local_dec(&open_count);
10837 open_exclu = 0;
10838
10839 spin_unlock(&mce_state_lock);
10840 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10841 static void mce_reset(void)
10842 {
10843 cpu_missing = 0;
10844 - atomic_set(&mce_fake_paniced, 0);
10845 + atomic_set_unchecked(&mce_fake_paniced, 0);
10846 atomic_set(&mce_executing, 0);
10847 atomic_set(&mce_callin, 0);
10848 atomic_set(&global_nwo, 0);
10849 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10850 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10851 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10852 @@ -215,7 +215,9 @@ static int inject_init(void)
10853 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10854 return -ENOMEM;
10855 printk(KERN_INFO "Machine check injector initialized\n");
10856 - mce_chrdev_ops.write = mce_write;
10857 + pax_open_kernel();
10858 + *(void **)&mce_chrdev_ops.write = mce_write;
10859 + pax_close_kernel();
10860 register_die_notifier(&mce_raise_nb);
10861 return 0;
10862 }
10863 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10864 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:13.000000000 -0400
10865 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10866 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10867 u64 size_or_mask, size_and_mask;
10868 static bool mtrr_aps_delayed_init;
10869
10870 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10871 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10872
10873 const struct mtrr_ops *mtrr_if;
10874
10875 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10876 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10877 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10878 @@ -25,7 +25,7 @@ struct mtrr_ops {
10879 int (*validate_add_page)(unsigned long base, unsigned long size,
10880 unsigned int type);
10881 int (*have_wrcomb)(void);
10882 -};
10883 +} __do_const;
10884
10885 extern int generic_get_free_region(unsigned long base, unsigned long size,
10886 int replace_reg);
10887 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10888 --- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10889 +++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10890 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10891 int i, j, w, wmax, num = 0;
10892 struct hw_perf_event *hwc;
10893
10894 + pax_track_stack();
10895 +
10896 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10897
10898 for (i = 0; i < n; i++) {
10899 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10900 break;
10901
10902 perf_callchain_store(entry, frame.return_address);
10903 - fp = frame.next_frame;
10904 + fp = (__force const void __user *)frame.next_frame;
10905 }
10906 }
10907
10908 diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10909 --- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10910 +++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10911 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10912 regs = args->regs;
10913
10914 #ifdef CONFIG_X86_32
10915 - if (!user_mode_vm(regs)) {
10916 + if (!user_mode(regs)) {
10917 crash_fixup_ss_esp(&fixed_regs, regs);
10918 regs = &fixed_regs;
10919 }
10920 diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10921 --- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10922 +++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10923 @@ -11,7 +11,7 @@
10924
10925 #define DOUBLEFAULT_STACKSIZE (1024)
10926 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10927 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10928 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10929
10930 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10931
10932 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10933 unsigned long gdt, tss;
10934
10935 store_gdt(&gdt_desc);
10936 - gdt = gdt_desc.address;
10937 + gdt = (unsigned long)gdt_desc.address;
10938
10939 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10940
10941 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10942 /* 0x2 bit is always set */
10943 .flags = X86_EFLAGS_SF | 0x2,
10944 .sp = STACK_START,
10945 - .es = __USER_DS,
10946 + .es = __KERNEL_DS,
10947 .cs = __KERNEL_CS,
10948 .ss = __KERNEL_DS,
10949 - .ds = __USER_DS,
10950 + .ds = __KERNEL_DS,
10951 .fs = __KERNEL_PERCPU,
10952
10953 .__cr3 = __pa_nodebug(swapper_pg_dir),
10954 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
10955 --- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10956 +++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10957 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10958 bp = stack_frame(task, regs);
10959
10960 for (;;) {
10961 - struct thread_info *context;
10962 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10963
10964 - context = (struct thread_info *)
10965 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10966 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10967 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10968
10969 - stack = (unsigned long *)context->previous_esp;
10970 - if (!stack)
10971 + if (stack_start == task_stack_page(task))
10972 break;
10973 + stack = *(unsigned long **)stack_start;
10974 if (ops->stack(data, "IRQ") < 0)
10975 break;
10976 touch_nmi_watchdog();
10977 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10978 * When in-kernel, we also print out the stack and code at the
10979 * time of the fault..
10980 */
10981 - if (!user_mode_vm(regs)) {
10982 + if (!user_mode(regs)) {
10983 unsigned int code_prologue = code_bytes * 43 / 64;
10984 unsigned int code_len = code_bytes;
10985 unsigned char c;
10986 u8 *ip;
10987 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10988
10989 printk(KERN_EMERG "Stack:\n");
10990 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10991
10992 printk(KERN_EMERG "Code: ");
10993
10994 - ip = (u8 *)regs->ip - code_prologue;
10995 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10996 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10997 /* try starting at IP */
10998 - ip = (u8 *)regs->ip;
10999 + ip = (u8 *)regs->ip + cs_base;
11000 code_len = code_len - code_prologue + 1;
11001 }
11002 for (i = 0; i < code_len; i++, ip++) {
11003 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11004 printk(" Bad EIP value.");
11005 break;
11006 }
11007 - if (ip == (u8 *)regs->ip)
11008 + if (ip == (u8 *)regs->ip + cs_base)
11009 printk("<%02x> ", c);
11010 else
11011 printk("%02x ", c);
11012 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11013 {
11014 unsigned short ud2;
11015
11016 + ip = ktla_ktva(ip);
11017 if (ip < PAGE_OFFSET)
11018 return 0;
11019 if (probe_kernel_address((unsigned short *)ip, ud2))
11020 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11021 --- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11022 +++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11023 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11024 unsigned long *irq_stack_end =
11025 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11026 unsigned used = 0;
11027 - struct thread_info *tinfo;
11028 int graph = 0;
11029 unsigned long dummy;
11030 + void *stack_start;
11031
11032 if (!task)
11033 task = current;
11034 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11035 * current stack address. If the stacks consist of nested
11036 * exceptions
11037 */
11038 - tinfo = task_thread_info(task);
11039 for (;;) {
11040 char *id;
11041 unsigned long *estack_end;
11042 +
11043 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11044 &used, &id);
11045
11046 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11047 if (ops->stack(data, id) < 0)
11048 break;
11049
11050 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11051 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11052 data, estack_end, &graph);
11053 ops->stack(data, "<EOE>");
11054 /*
11055 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11056 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11057 if (ops->stack(data, "IRQ") < 0)
11058 break;
11059 - bp = ops->walk_stack(tinfo, stack, bp,
11060 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11061 ops, data, irq_stack_end, &graph);
11062 /*
11063 * We link to the next stack (which would be
11064 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11065 /*
11066 * This handles the process stack:
11067 */
11068 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11069 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11070 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11071 put_cpu();
11072 }
11073 EXPORT_SYMBOL(dump_trace);
11074 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11075 --- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11076 +++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11077 @@ -2,6 +2,9 @@
11078 * Copyright (C) 1991, 1992 Linus Torvalds
11079 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11080 */
11081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11082 +#define __INCLUDED_BY_HIDESYM 1
11083 +#endif
11084 #include <linux/kallsyms.h>
11085 #include <linux/kprobes.h>
11086 #include <linux/uaccess.h>
11087 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11088 static void
11089 print_ftrace_graph_addr(unsigned long addr, void *data,
11090 const struct stacktrace_ops *ops,
11091 - struct thread_info *tinfo, int *graph)
11092 + struct task_struct *task, int *graph)
11093 {
11094 - struct task_struct *task = tinfo->task;
11095 unsigned long ret_addr;
11096 int index = task->curr_ret_stack;
11097
11098 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11099 static inline void
11100 print_ftrace_graph_addr(unsigned long addr, void *data,
11101 const struct stacktrace_ops *ops,
11102 - struct thread_info *tinfo, int *graph)
11103 + struct task_struct *task, int *graph)
11104 { }
11105 #endif
11106
11107 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11108 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11109 */
11110
11111 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11112 - void *p, unsigned int size, void *end)
11113 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11114 {
11115 - void *t = tinfo;
11116 if (end) {
11117 if (p < end && p >= (end-THREAD_SIZE))
11118 return 1;
11119 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11120 }
11121
11122 unsigned long
11123 -print_context_stack(struct thread_info *tinfo,
11124 +print_context_stack(struct task_struct *task, void *stack_start,
11125 unsigned long *stack, unsigned long bp,
11126 const struct stacktrace_ops *ops, void *data,
11127 unsigned long *end, int *graph)
11128 {
11129 struct stack_frame *frame = (struct stack_frame *)bp;
11130
11131 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11132 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11133 unsigned long addr;
11134
11135 addr = *stack;
11136 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11137 } else {
11138 ops->address(data, addr, 0);
11139 }
11140 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11141 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11142 }
11143 stack++;
11144 }
11145 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11146 EXPORT_SYMBOL_GPL(print_context_stack);
11147
11148 unsigned long
11149 -print_context_stack_bp(struct thread_info *tinfo,
11150 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11151 unsigned long *stack, unsigned long bp,
11152 const struct stacktrace_ops *ops, void *data,
11153 unsigned long *end, int *graph)
11154 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11155 struct stack_frame *frame = (struct stack_frame *)bp;
11156 unsigned long *ret_addr = &frame->return_address;
11157
11158 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11159 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11160 unsigned long addr = *ret_addr;
11161
11162 if (!__kernel_text_address(addr))
11163 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11164 ops->address(data, addr, 1);
11165 frame = frame->next_frame;
11166 ret_addr = &frame->return_address;
11167 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11168 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11169 }
11170
11171 return (unsigned long)frame;
11172 @@ -186,7 +186,7 @@ void dump_stack(void)
11173
11174 bp = stack_frame(current, NULL);
11175 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11176 - current->pid, current->comm, print_tainted(),
11177 + task_pid_nr(current), current->comm, print_tainted(),
11178 init_utsname()->release,
11179 (int)strcspn(init_utsname()->version, " "),
11180 init_utsname()->version);
11181 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11182 }
11183 EXPORT_SYMBOL_GPL(oops_begin);
11184
11185 +extern void gr_handle_kernel_exploit(void);
11186 +
11187 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11188 {
11189 if (regs && kexec_should_crash(current))
11190 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11191 panic("Fatal exception in interrupt");
11192 if (panic_on_oops)
11193 panic("Fatal exception");
11194 - do_exit(signr);
11195 +
11196 + gr_handle_kernel_exploit();
11197 +
11198 + do_group_exit(signr);
11199 }
11200
11201 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11202 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11203
11204 show_registers(regs);
11205 #ifdef CONFIG_X86_32
11206 - if (user_mode_vm(regs)) {
11207 + if (user_mode(regs)) {
11208 sp = regs->sp;
11209 ss = regs->ss & 0xffff;
11210 } else {
11211 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11212 unsigned long flags = oops_begin();
11213 int sig = SIGSEGV;
11214
11215 - if (!user_mode_vm(regs))
11216 + if (!user_mode(regs))
11217 report_bug(regs->ip, regs);
11218
11219 if (__die(str, regs, err))
11220 diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11221 --- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11222 +++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11223 @@ -7,6 +7,7 @@
11224 #include <linux/pci_regs.h>
11225 #include <linux/pci_ids.h>
11226 #include <linux/errno.h>
11227 +#include <linux/sched.h>
11228 #include <asm/io.h>
11229 #include <asm/processor.h>
11230 #include <asm/fcntl.h>
11231 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11232 int n;
11233 va_list ap;
11234
11235 + pax_track_stack();
11236 +
11237 va_start(ap, fmt);
11238 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11239 early_console->write(early_console, buf, n);
11240 diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11241 --- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11242 +++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11243 @@ -185,13 +185,146 @@
11244 /*CFI_REL_OFFSET gs, PT_GS*/
11245 .endm
11246 .macro SET_KERNEL_GS reg
11247 +
11248 +#ifdef CONFIG_CC_STACKPROTECTOR
11249 movl $(__KERNEL_STACK_CANARY), \reg
11250 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11251 + movl $(__USER_DS), \reg
11252 +#else
11253 + xorl \reg, \reg
11254 +#endif
11255 +
11256 movl \reg, %gs
11257 .endm
11258
11259 #endif /* CONFIG_X86_32_LAZY_GS */
11260
11261 -.macro SAVE_ALL
11262 +.macro pax_enter_kernel
11263 +#ifdef CONFIG_PAX_KERNEXEC
11264 + call pax_enter_kernel
11265 +#endif
11266 +.endm
11267 +
11268 +.macro pax_exit_kernel
11269 +#ifdef CONFIG_PAX_KERNEXEC
11270 + call pax_exit_kernel
11271 +#endif
11272 +.endm
11273 +
11274 +#ifdef CONFIG_PAX_KERNEXEC
11275 +ENTRY(pax_enter_kernel)
11276 +#ifdef CONFIG_PARAVIRT
11277 + pushl %eax
11278 + pushl %ecx
11279 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11280 + mov %eax, %esi
11281 +#else
11282 + mov %cr0, %esi
11283 +#endif
11284 + bts $16, %esi
11285 + jnc 1f
11286 + mov %cs, %esi
11287 + cmp $__KERNEL_CS, %esi
11288 + jz 3f
11289 + ljmp $__KERNEL_CS, $3f
11290 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11291 +2:
11292 +#ifdef CONFIG_PARAVIRT
11293 + mov %esi, %eax
11294 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11295 +#else
11296 + mov %esi, %cr0
11297 +#endif
11298 +3:
11299 +#ifdef CONFIG_PARAVIRT
11300 + popl %ecx
11301 + popl %eax
11302 +#endif
11303 + ret
11304 +ENDPROC(pax_enter_kernel)
11305 +
11306 +ENTRY(pax_exit_kernel)
11307 +#ifdef CONFIG_PARAVIRT
11308 + pushl %eax
11309 + pushl %ecx
11310 +#endif
11311 + mov %cs, %esi
11312 + cmp $__KERNEXEC_KERNEL_CS, %esi
11313 + jnz 2f
11314 +#ifdef CONFIG_PARAVIRT
11315 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11316 + mov %eax, %esi
11317 +#else
11318 + mov %cr0, %esi
11319 +#endif
11320 + btr $16, %esi
11321 + ljmp $__KERNEL_CS, $1f
11322 +1:
11323 +#ifdef CONFIG_PARAVIRT
11324 + mov %esi, %eax
11325 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11326 +#else
11327 + mov %esi, %cr0
11328 +#endif
11329 +2:
11330 +#ifdef CONFIG_PARAVIRT
11331 + popl %ecx
11332 + popl %eax
11333 +#endif
11334 + ret
11335 +ENDPROC(pax_exit_kernel)
11336 +#endif
11337 +
11338 +.macro pax_erase_kstack
11339 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11340 + call pax_erase_kstack
11341 +#endif
11342 +.endm
11343 +
11344 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11345 +/*
11346 + * ebp: thread_info
11347 + * ecx, edx: can be clobbered
11348 + */
11349 +ENTRY(pax_erase_kstack)
11350 + pushl %edi
11351 + pushl %eax
11352 +
11353 + mov TI_lowest_stack(%ebp), %edi
11354 + mov $-0xBEEF, %eax
11355 + std
11356 +
11357 +1: mov %edi, %ecx
11358 + and $THREAD_SIZE_asm - 1, %ecx
11359 + shr $2, %ecx
11360 + repne scasl
11361 + jecxz 2f
11362 +
11363 + cmp $2*16, %ecx
11364 + jc 2f
11365 +
11366 + mov $2*16, %ecx
11367 + repe scasl
11368 + jecxz 2f
11369 + jne 1b
11370 +
11371 +2: cld
11372 + mov %esp, %ecx
11373 + sub %edi, %ecx
11374 + shr $2, %ecx
11375 + rep stosl
11376 +
11377 + mov TI_task_thread_sp0(%ebp), %edi
11378 + sub $128, %edi
11379 + mov %edi, TI_lowest_stack(%ebp)
11380 +
11381 + popl %eax
11382 + popl %edi
11383 + ret
11384 +ENDPROC(pax_erase_kstack)
11385 +#endif
11386 +
11387 +.macro __SAVE_ALL _DS
11388 cld
11389 PUSH_GS
11390 pushl_cfi %fs
11391 @@ -214,7 +347,7 @@
11392 CFI_REL_OFFSET ecx, 0
11393 pushl_cfi %ebx
11394 CFI_REL_OFFSET ebx, 0
11395 - movl $(__USER_DS), %edx
11396 + movl $\_DS, %edx
11397 movl %edx, %ds
11398 movl %edx, %es
11399 movl $(__KERNEL_PERCPU), %edx
11400 @@ -222,6 +355,15 @@
11401 SET_KERNEL_GS %edx
11402 .endm
11403
11404 +.macro SAVE_ALL
11405 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11406 + __SAVE_ALL __KERNEL_DS
11407 + pax_enter_kernel
11408 +#else
11409 + __SAVE_ALL __USER_DS
11410 +#endif
11411 +.endm
11412 +
11413 .macro RESTORE_INT_REGS
11414 popl_cfi %ebx
11415 CFI_RESTORE ebx
11416 @@ -332,7 +474,15 @@ check_userspace:
11417 movb PT_CS(%esp), %al
11418 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11419 cmpl $USER_RPL, %eax
11420 +
11421 +#ifdef CONFIG_PAX_KERNEXEC
11422 + jae resume_userspace
11423 +
11424 + PAX_EXIT_KERNEL
11425 + jmp resume_kernel
11426 +#else
11427 jb resume_kernel # not returning to v8086 or userspace
11428 +#endif
11429
11430 ENTRY(resume_userspace)
11431 LOCKDEP_SYS_EXIT
11432 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11433 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11434 # int/exception return?
11435 jne work_pending
11436 - jmp restore_all
11437 + jmp restore_all_pax
11438 END(ret_from_exception)
11439
11440 #ifdef CONFIG_PREEMPT
11441 @@ -394,23 +544,34 @@ sysenter_past_esp:
11442 /*CFI_REL_OFFSET cs, 0*/
11443 /*
11444 * Push current_thread_info()->sysenter_return to the stack.
11445 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11446 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11447 */
11448 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11449 + pushl_cfi $0
11450 CFI_REL_OFFSET eip, 0
11451
11452 pushl_cfi %eax
11453 SAVE_ALL
11454 + GET_THREAD_INFO(%ebp)
11455 + movl TI_sysenter_return(%ebp),%ebp
11456 + movl %ebp,PT_EIP(%esp)
11457 ENABLE_INTERRUPTS(CLBR_NONE)
11458
11459 /*
11460 * Load the potential sixth argument from user stack.
11461 * Careful about security.
11462 */
11463 + movl PT_OLDESP(%esp),%ebp
11464 +
11465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11466 + mov PT_OLDSS(%esp),%ds
11467 +1: movl %ds:(%ebp),%ebp
11468 + push %ss
11469 + pop %ds
11470 +#else
11471 cmpl $__PAGE_OFFSET-3,%ebp
11472 jae syscall_fault
11473 1: movl (%ebp),%ebp
11474 +#endif
11475 +
11476 movl %ebp,PT_EBP(%esp)
11477 .section __ex_table,"a"
11478 .align 4
11479 @@ -433,12 +594,23 @@ sysenter_do_call:
11480 testl $_TIF_ALLWORK_MASK, %ecx
11481 jne sysexit_audit
11482 sysenter_exit:
11483 +
11484 +#ifdef CONFIG_PAX_RANDKSTACK
11485 + pushl_cfi %eax
11486 + call pax_randomize_kstack
11487 + popl_cfi %eax
11488 +#endif
11489 +
11490 + pax_erase_kstack
11491 +
11492 /* if something modifies registers it must also disable sysexit */
11493 movl PT_EIP(%esp), %edx
11494 movl PT_OLDESP(%esp), %ecx
11495 xorl %ebp,%ebp
11496 TRACE_IRQS_ON
11497 1: mov PT_FS(%esp), %fs
11498 +2: mov PT_DS(%esp), %ds
11499 +3: mov PT_ES(%esp), %es
11500 PTGS_TO_GS
11501 ENABLE_INTERRUPTS_SYSEXIT
11502
11503 @@ -455,6 +627,9 @@ sysenter_audit:
11504 movl %eax,%edx /* 2nd arg: syscall number */
11505 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11506 call audit_syscall_entry
11507 +
11508 + pax_erase_kstack
11509 +
11510 pushl_cfi %ebx
11511 movl PT_EAX(%esp),%eax /* reload syscall number */
11512 jmp sysenter_do_call
11513 @@ -481,11 +656,17 @@ sysexit_audit:
11514
11515 CFI_ENDPROC
11516 .pushsection .fixup,"ax"
11517 -2: movl $0,PT_FS(%esp)
11518 +4: movl $0,PT_FS(%esp)
11519 + jmp 1b
11520 +5: movl $0,PT_DS(%esp)
11521 + jmp 1b
11522 +6: movl $0,PT_ES(%esp)
11523 jmp 1b
11524 .section __ex_table,"a"
11525 .align 4
11526 - .long 1b,2b
11527 + .long 1b,4b
11528 + .long 2b,5b
11529 + .long 3b,6b
11530 .popsection
11531 PTGS_TO_GS_EX
11532 ENDPROC(ia32_sysenter_target)
11533 @@ -518,6 +699,14 @@ syscall_exit:
11534 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11535 jne syscall_exit_work
11536
11537 +restore_all_pax:
11538 +
11539 +#ifdef CONFIG_PAX_RANDKSTACK
11540 + call pax_randomize_kstack
11541 +#endif
11542 +
11543 + pax_erase_kstack
11544 +
11545 restore_all:
11546 TRACE_IRQS_IRET
11547 restore_all_notrace:
11548 @@ -577,14 +766,34 @@ ldt_ss:
11549 * compensating for the offset by changing to the ESPFIX segment with
11550 * a base address that matches for the difference.
11551 */
11552 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11553 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11554 mov %esp, %edx /* load kernel esp */
11555 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11556 mov %dx, %ax /* eax: new kernel esp */
11557 sub %eax, %edx /* offset (low word is 0) */
11558 +#ifdef CONFIG_SMP
11559 + movl PER_CPU_VAR(cpu_number), %ebx
11560 + shll $PAGE_SHIFT_asm, %ebx
11561 + addl $cpu_gdt_table, %ebx
11562 +#else
11563 + movl $cpu_gdt_table, %ebx
11564 +#endif
11565 shr $16, %edx
11566 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11567 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11568 +
11569 +#ifdef CONFIG_PAX_KERNEXEC
11570 + mov %cr0, %esi
11571 + btr $16, %esi
11572 + mov %esi, %cr0
11573 +#endif
11574 +
11575 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11576 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11577 +
11578 +#ifdef CONFIG_PAX_KERNEXEC
11579 + bts $16, %esi
11580 + mov %esi, %cr0
11581 +#endif
11582 +
11583 pushl_cfi $__ESPFIX_SS
11584 pushl_cfi %eax /* new kernel esp */
11585 /* Disable interrupts, but do not irqtrace this section: we
11586 @@ -613,29 +822,23 @@ work_resched:
11587 movl TI_flags(%ebp), %ecx
11588 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11589 # than syscall tracing?
11590 - jz restore_all
11591 + jz restore_all_pax
11592 testb $_TIF_NEED_RESCHED, %cl
11593 jnz work_resched
11594
11595 work_notifysig: # deal with pending signals and
11596 # notify-resume requests
11597 + movl %esp, %eax
11598 #ifdef CONFIG_VM86
11599 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11600 - movl %esp, %eax
11601 - jne work_notifysig_v86 # returning to kernel-space or
11602 + jz 1f # returning to kernel-space or
11603 # vm86-space
11604 - xorl %edx, %edx
11605 - call do_notify_resume
11606 - jmp resume_userspace_sig
11607
11608 - ALIGN
11609 -work_notifysig_v86:
11610 pushl_cfi %ecx # save ti_flags for do_notify_resume
11611 call save_v86_state # %eax contains pt_regs pointer
11612 popl_cfi %ecx
11613 movl %eax, %esp
11614 -#else
11615 - movl %esp, %eax
11616 +1:
11617 #endif
11618 xorl %edx, %edx
11619 call do_notify_resume
11620 @@ -648,6 +851,9 @@ syscall_trace_entry:
11621 movl $-ENOSYS,PT_EAX(%esp)
11622 movl %esp, %eax
11623 call syscall_trace_enter
11624 +
11625 + pax_erase_kstack
11626 +
11627 /* What it returned is what we'll actually use. */
11628 cmpl $(nr_syscalls), %eax
11629 jnae syscall_call
11630 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11631
11632 RING0_INT_FRAME # can't unwind into user space anyway
11633 syscall_fault:
11634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11635 + push %ss
11636 + pop %ds
11637 +#endif
11638 GET_THREAD_INFO(%ebp)
11639 movl $-EFAULT,PT_EAX(%esp)
11640 jmp resume_userspace
11641 @@ -752,6 +962,36 @@ ptregs_clone:
11642 CFI_ENDPROC
11643 ENDPROC(ptregs_clone)
11644
11645 + ALIGN;
11646 +ENTRY(kernel_execve)
11647 + CFI_STARTPROC
11648 + pushl_cfi %ebp
11649 + sub $PT_OLDSS+4,%esp
11650 + pushl_cfi %edi
11651 + pushl_cfi %ecx
11652 + pushl_cfi %eax
11653 + lea 3*4(%esp),%edi
11654 + mov $PT_OLDSS/4+1,%ecx
11655 + xorl %eax,%eax
11656 + rep stosl
11657 + popl_cfi %eax
11658 + popl_cfi %ecx
11659 + popl_cfi %edi
11660 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11661 + pushl_cfi %esp
11662 + call sys_execve
11663 + add $4,%esp
11664 + CFI_ADJUST_CFA_OFFSET -4
11665 + GET_THREAD_INFO(%ebp)
11666 + test %eax,%eax
11667 + jz syscall_exit
11668 + add $PT_OLDSS+4,%esp
11669 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11670 + popl_cfi %ebp
11671 + ret
11672 + CFI_ENDPROC
11673 +ENDPROC(kernel_execve)
11674 +
11675 .macro FIXUP_ESPFIX_STACK
11676 /*
11677 * Switch back for ESPFIX stack to the normal zerobased stack
11678 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11679 * normal stack and adjusts ESP with the matching offset.
11680 */
11681 /* fixup the stack */
11682 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11683 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11684 +#ifdef CONFIG_SMP
11685 + movl PER_CPU_VAR(cpu_number), %ebx
11686 + shll $PAGE_SHIFT_asm, %ebx
11687 + addl $cpu_gdt_table, %ebx
11688 +#else
11689 + movl $cpu_gdt_table, %ebx
11690 +#endif
11691 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11692 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11693 shl $16, %eax
11694 addl %esp, %eax /* the adjusted stack pointer */
11695 pushl_cfi $__KERNEL_DS
11696 @@ -1213,7 +1460,6 @@ return_to_handler:
11697 jmp *%ecx
11698 #endif
11699
11700 -.section .rodata,"a"
11701 #include "syscall_table_32.S"
11702
11703 syscall_table_size=(.-sys_call_table)
11704 @@ -1259,9 +1505,12 @@ error_code:
11705 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11706 REG_TO_PTGS %ecx
11707 SET_KERNEL_GS %ecx
11708 - movl $(__USER_DS), %ecx
11709 + movl $(__KERNEL_DS), %ecx
11710 movl %ecx, %ds
11711 movl %ecx, %es
11712 +
11713 + pax_enter_kernel
11714 +
11715 TRACE_IRQS_OFF
11716 movl %esp,%eax # pt_regs pointer
11717 call *%edi
11718 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11719 xorl %edx,%edx # zero error code
11720 movl %esp,%eax # pt_regs pointer
11721 call do_nmi
11722 +
11723 + pax_exit_kernel
11724 +
11725 jmp restore_all_notrace
11726 CFI_ENDPROC
11727
11728 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11729 FIXUP_ESPFIX_STACK # %eax == %esp
11730 xorl %edx,%edx # zero error code
11731 call do_nmi
11732 +
11733 + pax_exit_kernel
11734 +
11735 RESTORE_REGS
11736 lss 12+4(%esp), %esp # back to espfix stack
11737 CFI_ADJUST_CFA_OFFSET -24
11738 diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11739 --- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11740 +++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11741 @@ -53,6 +53,7 @@
11742 #include <asm/paravirt.h>
11743 #include <asm/ftrace.h>
11744 #include <asm/percpu.h>
11745 +#include <asm/pgtable.h>
11746
11747 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11748 #include <linux/elf-em.h>
11749 @@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11750 ENDPROC(native_usergs_sysret64)
11751 #endif /* CONFIG_PARAVIRT */
11752
11753 + .macro ljmpq sel, off
11754 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11755 + .byte 0x48; ljmp *1234f(%rip)
11756 + .pushsection .rodata
11757 + .align 16
11758 + 1234: .quad \off; .word \sel
11759 + .popsection
11760 +#else
11761 + pushq $\sel
11762 + pushq $\off
11763 + lretq
11764 +#endif
11765 + .endm
11766 +
11767 + .macro pax_enter_kernel
11768 +#ifdef CONFIG_PAX_KERNEXEC
11769 + call pax_enter_kernel
11770 +#endif
11771 + .endm
11772 +
11773 + .macro pax_exit_kernel
11774 +#ifdef CONFIG_PAX_KERNEXEC
11775 + call pax_exit_kernel
11776 +#endif
11777 + .endm
11778 +
11779 +#ifdef CONFIG_PAX_KERNEXEC
11780 +ENTRY(pax_enter_kernel)
11781 + pushq %rdi
11782 +
11783 +#ifdef CONFIG_PARAVIRT
11784 + PV_SAVE_REGS(CLBR_RDI)
11785 +#endif
11786 +
11787 + GET_CR0_INTO_RDI
11788 + bts $16,%rdi
11789 + jnc 1f
11790 + mov %cs,%edi
11791 + cmp $__KERNEL_CS,%edi
11792 + jz 3f
11793 + ljmpq __KERNEL_CS,3f
11794 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11795 +2: SET_RDI_INTO_CR0
11796 +3:
11797 +
11798 +#ifdef CONFIG_PARAVIRT
11799 + PV_RESTORE_REGS(CLBR_RDI)
11800 +#endif
11801 +
11802 + popq %rdi
11803 + retq
11804 +ENDPROC(pax_enter_kernel)
11805 +
11806 +ENTRY(pax_exit_kernel)
11807 + pushq %rdi
11808 +
11809 +#ifdef CONFIG_PARAVIRT
11810 + PV_SAVE_REGS(CLBR_RDI)
11811 +#endif
11812 +
11813 + mov %cs,%rdi
11814 + cmp $__KERNEXEC_KERNEL_CS,%edi
11815 + jnz 2f
11816 + GET_CR0_INTO_RDI
11817 + btr $16,%rdi
11818 + ljmpq __KERNEL_CS,1f
11819 +1: SET_RDI_INTO_CR0
11820 +2:
11821 +
11822 +#ifdef CONFIG_PARAVIRT
11823 + PV_RESTORE_REGS(CLBR_RDI);
11824 +#endif
11825 +
11826 + popq %rdi
11827 + retq
11828 +ENDPROC(pax_exit_kernel)
11829 +#endif
11830 +
11831 + .macro pax_enter_kernel_user
11832 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11833 + call pax_enter_kernel_user
11834 +#endif
11835 + .endm
11836 +
11837 + .macro pax_exit_kernel_user
11838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11839 + call pax_exit_kernel_user
11840 +#endif
11841 +#ifdef CONFIG_PAX_RANDKSTACK
11842 + push %rax
11843 + call pax_randomize_kstack
11844 + pop %rax
11845 +#endif
11846 + .endm
11847 +
11848 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11849 +ENTRY(pax_enter_kernel_user)
11850 + pushq %rdi
11851 + pushq %rbx
11852 +
11853 +#ifdef CONFIG_PARAVIRT
11854 + PV_SAVE_REGS(CLBR_RDI)
11855 +#endif
11856 +
11857 + GET_CR3_INTO_RDI
11858 + mov %rdi,%rbx
11859 + add $__START_KERNEL_map,%rbx
11860 + sub phys_base(%rip),%rbx
11861 +
11862 +#ifdef CONFIG_PARAVIRT
11863 + pushq %rdi
11864 + cmpl $0, pv_info+PARAVIRT_enabled
11865 + jz 1f
11866 + i = 0
11867 + .rept USER_PGD_PTRS
11868 + mov i*8(%rbx),%rsi
11869 + mov $0,%sil
11870 + lea i*8(%rbx),%rdi
11871 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11872 + i = i + 1
11873 + .endr
11874 + jmp 2f
11875 +1:
11876 +#endif
11877 +
11878 + i = 0
11879 + .rept USER_PGD_PTRS
11880 + movb $0,i*8(%rbx)
11881 + i = i + 1
11882 + .endr
11883 +
11884 +#ifdef CONFIG_PARAVIRT
11885 +2: popq %rdi
11886 +#endif
11887 + SET_RDI_INTO_CR3
11888 +
11889 +#ifdef CONFIG_PAX_KERNEXEC
11890 + GET_CR0_INTO_RDI
11891 + bts $16,%rdi
11892 + SET_RDI_INTO_CR0
11893 +#endif
11894 +
11895 +#ifdef CONFIG_PARAVIRT
11896 + PV_RESTORE_REGS(CLBR_RDI)
11897 +#endif
11898 +
11899 + popq %rbx
11900 + popq %rdi
11901 + retq
11902 +ENDPROC(pax_enter_kernel_user)
11903 +
11904 +ENTRY(pax_exit_kernel_user)
11905 + push %rdi
11906 +
11907 +#ifdef CONFIG_PARAVIRT
11908 + pushq %rbx
11909 + PV_SAVE_REGS(CLBR_RDI)
11910 +#endif
11911 +
11912 +#ifdef CONFIG_PAX_KERNEXEC
11913 + GET_CR0_INTO_RDI
11914 + btr $16,%rdi
11915 + SET_RDI_INTO_CR0
11916 +#endif
11917 +
11918 + GET_CR3_INTO_RDI
11919 + add $__START_KERNEL_map,%rdi
11920 + sub phys_base(%rip),%rdi
11921 +
11922 +#ifdef CONFIG_PARAVIRT
11923 + cmpl $0, pv_info+PARAVIRT_enabled
11924 + jz 1f
11925 + mov %rdi,%rbx
11926 + i = 0
11927 + .rept USER_PGD_PTRS
11928 + mov i*8(%rbx),%rsi
11929 + mov $0x67,%sil
11930 + lea i*8(%rbx),%rdi
11931 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11932 + i = i + 1
11933 + .endr
11934 + jmp 2f
11935 +1:
11936 +#endif
11937 +
11938 + i = 0
11939 + .rept USER_PGD_PTRS
11940 + movb $0x67,i*8(%rdi)
11941 + i = i + 1
11942 + .endr
11943 +
11944 +#ifdef CONFIG_PARAVIRT
11945 +2: PV_RESTORE_REGS(CLBR_RDI)
11946 + popq %rbx
11947 +#endif
11948 +
11949 + popq %rdi
11950 + retq
11951 +ENDPROC(pax_exit_kernel_user)
11952 +#endif
11953 +
11954 + .macro pax_erase_kstack
11955 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11956 + call pax_erase_kstack
11957 +#endif
11958 + .endm
11959 +
11960 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11961 +/*
11962 + * r10: thread_info
11963 + * rcx, rdx: can be clobbered
11964 + */
11965 +ENTRY(pax_erase_kstack)
11966 + pushq %rdi
11967 + pushq %rax
11968 + pushq %r10
11969 +
11970 + GET_THREAD_INFO(%r10)
11971 + mov TI_lowest_stack(%r10), %rdi
11972 + mov $-0xBEEF, %rax
11973 + std
11974 +
11975 +1: mov %edi, %ecx
11976 + and $THREAD_SIZE_asm - 1, %ecx
11977 + shr $3, %ecx
11978 + repne scasq
11979 + jecxz 2f
11980 +
11981 + cmp $2*8, %ecx
11982 + jc 2f
11983 +
11984 + mov $2*8, %ecx
11985 + repe scasq
11986 + jecxz 2f
11987 + jne 1b
11988 +
11989 +2: cld
11990 + mov %esp, %ecx
11991 + sub %edi, %ecx
11992 +
11993 + cmp $THREAD_SIZE_asm, %rcx
11994 + jb 3f
11995 + ud2
11996 +3:
11997 +
11998 + shr $3, %ecx
11999 + rep stosq
12000 +
12001 + mov TI_task_thread_sp0(%r10), %rdi
12002 + sub $256, %rdi
12003 + mov %rdi, TI_lowest_stack(%r10)
12004 +
12005 + popq %r10
12006 + popq %rax
12007 + popq %rdi
12008 + ret
12009 +ENDPROC(pax_erase_kstack)
12010 +#endif
12011
12012 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12013 #ifdef CONFIG_TRACE_IRQFLAGS
12014 @@ -318,7 +577,7 @@ ENTRY(save_args)
12015 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12016 movq_cfi rbp, 8 /* push %rbp */
12017 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12018 - testl $3, CS(%rdi)
12019 + testb $3, CS(%rdi)
12020 je 1f
12021 SWAPGS
12022 /*
12023 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12024
12025 RESTORE_REST
12026
12027 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12028 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12029 je int_ret_from_sys_call
12030
12031 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12032 @@ -455,7 +714,7 @@ END(ret_from_fork)
12033 ENTRY(system_call)
12034 CFI_STARTPROC simple
12035 CFI_SIGNAL_FRAME
12036 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12037 + CFI_DEF_CFA rsp,0
12038 CFI_REGISTER rip,rcx
12039 /*CFI_REGISTER rflags,r11*/
12040 SWAPGS_UNSAFE_STACK
12041 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12042
12043 movq %rsp,PER_CPU_VAR(old_rsp)
12044 movq PER_CPU_VAR(kernel_stack),%rsp
12045 + pax_enter_kernel_user
12046 /*
12047 * No need to follow this irqs off/on section - it's straight
12048 * and short:
12049 */
12050 ENABLE_INTERRUPTS(CLBR_NONE)
12051 - SAVE_ARGS 8,1
12052 + SAVE_ARGS 8*6,1
12053 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12054 movq %rcx,RIP-ARGOFFSET(%rsp)
12055 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12056 @@ -502,6 +762,8 @@ sysret_check:
12057 andl %edi,%edx
12058 jnz sysret_careful
12059 CFI_REMEMBER_STATE
12060 + pax_exit_kernel_user
12061 + pax_erase_kstack
12062 /*
12063 * sysretq will re-enable interrupts:
12064 */
12065 @@ -560,6 +822,9 @@ auditsys:
12066 movq %rax,%rsi /* 2nd arg: syscall number */
12067 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12068 call audit_syscall_entry
12069 +
12070 + pax_erase_kstack
12071 +
12072 LOAD_ARGS 0 /* reload call-clobbered registers */
12073 jmp system_call_fastpath
12074
12075 @@ -590,6 +855,9 @@ tracesys:
12076 FIXUP_TOP_OF_STACK %rdi
12077 movq %rsp,%rdi
12078 call syscall_trace_enter
12079 +
12080 + pax_erase_kstack
12081 +
12082 /*
12083 * Reload arg registers from stack in case ptrace changed them.
12084 * We don't reload %rax because syscall_trace_enter() returned
12085 @@ -611,7 +879,7 @@ tracesys:
12086 GLOBAL(int_ret_from_sys_call)
12087 DISABLE_INTERRUPTS(CLBR_NONE)
12088 TRACE_IRQS_OFF
12089 - testl $3,CS-ARGOFFSET(%rsp)
12090 + testb $3,CS-ARGOFFSET(%rsp)
12091 je retint_restore_args
12092 movl $_TIF_ALLWORK_MASK,%edi
12093 /* edi: mask to check */
12094 @@ -793,6 +1061,16 @@ END(interrupt)
12095 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12096 call save_args
12097 PARTIAL_FRAME 0
12098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12099 + testb $3, CS(%rdi)
12100 + jnz 1f
12101 + pax_enter_kernel
12102 + jmp 2f
12103 +1: pax_enter_kernel_user
12104 +2:
12105 +#else
12106 + pax_enter_kernel
12107 +#endif
12108 call \func
12109 .endm
12110
12111 @@ -825,7 +1103,7 @@ ret_from_intr:
12112 CFI_ADJUST_CFA_OFFSET -8
12113 exit_intr:
12114 GET_THREAD_INFO(%rcx)
12115 - testl $3,CS-ARGOFFSET(%rsp)
12116 + testb $3,CS-ARGOFFSET(%rsp)
12117 je retint_kernel
12118
12119 /* Interrupt came from user space */
12120 @@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12121 * The iretq could re-enable interrupts:
12122 */
12123 DISABLE_INTERRUPTS(CLBR_ANY)
12124 + pax_exit_kernel_user
12125 + pax_erase_kstack
12126 TRACE_IRQS_IRETQ
12127 SWAPGS
12128 jmp restore_args
12129
12130 retint_restore_args: /* return to kernel space */
12131 DISABLE_INTERRUPTS(CLBR_ANY)
12132 + pax_exit_kernel
12133 /*
12134 * The iretq could re-enable interrupts:
12135 */
12136 @@ -1027,6 +1308,16 @@ ENTRY(\sym)
12137 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12138 call error_entry
12139 DEFAULT_FRAME 0
12140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12141 + testb $3, CS(%rsp)
12142 + jnz 1f
12143 + pax_enter_kernel
12144 + jmp 2f
12145 +1: pax_enter_kernel_user
12146 +2:
12147 +#else
12148 + pax_enter_kernel
12149 +#endif
12150 movq %rsp,%rdi /* pt_regs pointer */
12151 xorl %esi,%esi /* no error code */
12152 call \do_sym
12153 @@ -1044,6 +1335,16 @@ ENTRY(\sym)
12154 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12155 call save_paranoid
12156 TRACE_IRQS_OFF
12157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12158 + testb $3, CS(%rsp)
12159 + jnz 1f
12160 + pax_enter_kernel
12161 + jmp 2f
12162 +1: pax_enter_kernel_user
12163 +2:
12164 +#else
12165 + pax_enter_kernel
12166 +#endif
12167 movq %rsp,%rdi /* pt_regs pointer */
12168 xorl %esi,%esi /* no error code */
12169 call \do_sym
12170 @@ -1052,7 +1353,7 @@ ENTRY(\sym)
12171 END(\sym)
12172 .endm
12173
12174 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12175 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12176 .macro paranoidzeroentry_ist sym do_sym ist
12177 ENTRY(\sym)
12178 INTR_FRAME
12179 @@ -1062,8 +1363,24 @@ ENTRY(\sym)
12180 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12181 call save_paranoid
12182 TRACE_IRQS_OFF
12183 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12184 + testb $3, CS(%rsp)
12185 + jnz 1f
12186 + pax_enter_kernel
12187 + jmp 2f
12188 +1: pax_enter_kernel_user
12189 +2:
12190 +#else
12191 + pax_enter_kernel
12192 +#endif
12193 movq %rsp,%rdi /* pt_regs pointer */
12194 xorl %esi,%esi /* no error code */
12195 +#ifdef CONFIG_SMP
12196 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12197 + lea init_tss(%r12), %r12
12198 +#else
12199 + lea init_tss(%rip), %r12
12200 +#endif
12201 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12202 call \do_sym
12203 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12204 @@ -1080,6 +1397,16 @@ ENTRY(\sym)
12205 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12206 call error_entry
12207 DEFAULT_FRAME 0
12208 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12209 + testb $3, CS(%rsp)
12210 + jnz 1f
12211 + pax_enter_kernel
12212 + jmp 2f
12213 +1: pax_enter_kernel_user
12214 +2:
12215 +#else
12216 + pax_enter_kernel
12217 +#endif
12218 movq %rsp,%rdi /* pt_regs pointer */
12219 movq ORIG_RAX(%rsp),%rsi /* get error code */
12220 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12221 @@ -1099,6 +1426,16 @@ ENTRY(\sym)
12222 call save_paranoid
12223 DEFAULT_FRAME 0
12224 TRACE_IRQS_OFF
12225 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12226 + testb $3, CS(%rsp)
12227 + jnz 1f
12228 + pax_enter_kernel
12229 + jmp 2f
12230 +1: pax_enter_kernel_user
12231 +2:
12232 +#else
12233 + pax_enter_kernel
12234 +#endif
12235 movq %rsp,%rdi /* pt_regs pointer */
12236 movq ORIG_RAX(%rsp),%rsi /* get error code */
12237 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12238 @@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12239 TRACE_IRQS_OFF
12240 testl %ebx,%ebx /* swapgs needed? */
12241 jnz paranoid_restore
12242 - testl $3,CS(%rsp)
12243 + testb $3,CS(%rsp)
12244 jnz paranoid_userspace
12245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12246 + pax_exit_kernel
12247 + TRACE_IRQS_IRETQ 0
12248 + SWAPGS_UNSAFE_STACK
12249 + RESTORE_ALL 8
12250 + jmp irq_return
12251 +#endif
12252 paranoid_swapgs:
12253 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12254 + pax_exit_kernel_user
12255 +#else
12256 + pax_exit_kernel
12257 +#endif
12258 TRACE_IRQS_IRETQ 0
12259 SWAPGS_UNSAFE_STACK
12260 RESTORE_ALL 8
12261 jmp irq_return
12262 paranoid_restore:
12263 + pax_exit_kernel
12264 TRACE_IRQS_IRETQ 0
12265 RESTORE_ALL 8
12266 jmp irq_return
12267 @@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12268 movq_cfi r14, R14+8
12269 movq_cfi r15, R15+8
12270 xorl %ebx,%ebx
12271 - testl $3,CS+8(%rsp)
12272 + testb $3,CS+8(%rsp)
12273 je error_kernelspace
12274 error_swapgs:
12275 SWAPGS
12276 @@ -1490,6 +1840,16 @@ ENTRY(nmi)
12277 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12278 call save_paranoid
12279 DEFAULT_FRAME 0
12280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12281 + testb $3, CS(%rsp)
12282 + jnz 1f
12283 + pax_enter_kernel
12284 + jmp 2f
12285 +1: pax_enter_kernel_user
12286 +2:
12287 +#else
12288 + pax_enter_kernel
12289 +#endif
12290 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12291 movq %rsp,%rdi
12292 movq $-1,%rsi
12293 @@ -1500,11 +1860,25 @@ ENTRY(nmi)
12294 DISABLE_INTERRUPTS(CLBR_NONE)
12295 testl %ebx,%ebx /* swapgs needed? */
12296 jnz nmi_restore
12297 - testl $3,CS(%rsp)
12298 + testb $3,CS(%rsp)
12299 jnz nmi_userspace
12300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12301 + pax_exit_kernel
12302 + SWAPGS_UNSAFE_STACK
12303 + RESTORE_ALL 8
12304 + jmp irq_return
12305 +#endif
12306 nmi_swapgs:
12307 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12308 + pax_exit_kernel_user
12309 +#else
12310 + pax_exit_kernel
12311 +#endif
12312 SWAPGS_UNSAFE_STACK
12313 + RESTORE_ALL 8
12314 + jmp irq_return
12315 nmi_restore:
12316 + pax_exit_kernel
12317 RESTORE_ALL 8
12318 jmp irq_return
12319 nmi_userspace:
12320 diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12321 --- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12322 +++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12323 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12324 static const void *mod_code_newcode; /* holds the text to write to the IP */
12325
12326 static unsigned nmi_wait_count;
12327 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12328 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12329
12330 int ftrace_arch_read_dyn_info(char *buf, int size)
12331 {
12332 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12333
12334 r = snprintf(buf, size, "%u %u",
12335 nmi_wait_count,
12336 - atomic_read(&nmi_update_count));
12337 + atomic_read_unchecked(&nmi_update_count));
12338 return r;
12339 }
12340
12341 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12342
12343 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12344 smp_rmb();
12345 + pax_open_kernel();
12346 ftrace_mod_code();
12347 - atomic_inc(&nmi_update_count);
12348 + pax_close_kernel();
12349 + atomic_inc_unchecked(&nmi_update_count);
12350 }
12351 /* Must have previous changes seen before executions */
12352 smp_mb();
12353 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12354 {
12355 unsigned char replaced[MCOUNT_INSN_SIZE];
12356
12357 + ip = ktla_ktva(ip);
12358 +
12359 /*
12360 * Note: Due to modules and __init, code can
12361 * disappear and change, we need to protect against faulting
12362 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12363 unsigned char old[MCOUNT_INSN_SIZE], *new;
12364 int ret;
12365
12366 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12367 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12368 new = ftrace_call_replace(ip, (unsigned long)func);
12369 ret = ftrace_modify_code(ip, old, new);
12370
12371 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12372 {
12373 unsigned char code[MCOUNT_INSN_SIZE];
12374
12375 + ip = ktla_ktva(ip);
12376 +
12377 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12378 return -EFAULT;
12379
12380 diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12381 --- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12382 +++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12383 @@ -19,6 +19,7 @@
12384 #include <asm/io_apic.h>
12385 #include <asm/bios_ebda.h>
12386 #include <asm/tlbflush.h>
12387 +#include <asm/boot.h>
12388
12389 static void __init i386_default_early_setup(void)
12390 {
12391 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12392 {
12393 memblock_init();
12394
12395 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12396 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12397
12398 #ifdef CONFIG_BLK_DEV_INITRD
12399 /* Reserve INITRD */
12400 diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12401 --- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12402 +++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12403 @@ -25,6 +25,12 @@
12404 /* Physical address */
12405 #define pa(X) ((X) - __PAGE_OFFSET)
12406
12407 +#ifdef CONFIG_PAX_KERNEXEC
12408 +#define ta(X) (X)
12409 +#else
12410 +#define ta(X) ((X) - __PAGE_OFFSET)
12411 +#endif
12412 +
12413 /*
12414 * References to members of the new_cpu_data structure.
12415 */
12416 @@ -54,11 +60,7 @@
12417 * and small than max_low_pfn, otherwise will waste some page table entries
12418 */
12419
12420 -#if PTRS_PER_PMD > 1
12421 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12422 -#else
12423 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12424 -#endif
12425 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12426
12427 /* Number of possible pages in the lowmem region */
12428 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12429 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12430 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12431
12432 /*
12433 + * Real beginning of normal "text" segment
12434 + */
12435 +ENTRY(stext)
12436 +ENTRY(_stext)
12437 +
12438 +/*
12439 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12440 * %esi points to the real-mode code as a 32-bit pointer.
12441 * CS and DS must be 4 GB flat segments, but we don't depend on
12442 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12443 * can.
12444 */
12445 __HEAD
12446 +
12447 +#ifdef CONFIG_PAX_KERNEXEC
12448 + jmp startup_32
12449 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12450 +.fill PAGE_SIZE-5,1,0xcc
12451 +#endif
12452 +
12453 ENTRY(startup_32)
12454 movl pa(stack_start),%ecx
12455
12456 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12457 2:
12458 leal -__PAGE_OFFSET(%ecx),%esp
12459
12460 +#ifdef CONFIG_SMP
12461 + movl $pa(cpu_gdt_table),%edi
12462 + movl $__per_cpu_load,%eax
12463 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12464 + rorl $16,%eax
12465 + movb %al,__KERNEL_PERCPU + 4(%edi)
12466 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12467 + movl $__per_cpu_end - 1,%eax
12468 + subl $__per_cpu_start,%eax
12469 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12470 +#endif
12471 +
12472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12473 + movl $NR_CPUS,%ecx
12474 + movl $pa(cpu_gdt_table),%edi
12475 +1:
12476 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12477 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12478 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12479 + addl $PAGE_SIZE_asm,%edi
12480 + loop 1b
12481 +#endif
12482 +
12483 +#ifdef CONFIG_PAX_KERNEXEC
12484 + movl $pa(boot_gdt),%edi
12485 + movl $__LOAD_PHYSICAL_ADDR,%eax
12486 + movw %ax,__BOOT_CS + 2(%edi)
12487 + rorl $16,%eax
12488 + movb %al,__BOOT_CS + 4(%edi)
12489 + movb %ah,__BOOT_CS + 7(%edi)
12490 + rorl $16,%eax
12491 +
12492 + ljmp $(__BOOT_CS),$1f
12493 +1:
12494 +
12495 + movl $NR_CPUS,%ecx
12496 + movl $pa(cpu_gdt_table),%edi
12497 + addl $__PAGE_OFFSET,%eax
12498 +1:
12499 + movw %ax,__KERNEL_CS + 2(%edi)
12500 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12501 + rorl $16,%eax
12502 + movb %al,__KERNEL_CS + 4(%edi)
12503 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12504 + movb %ah,__KERNEL_CS + 7(%edi)
12505 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12506 + rorl $16,%eax
12507 + addl $PAGE_SIZE_asm,%edi
12508 + loop 1b
12509 +#endif
12510 +
12511 /*
12512 * Clear BSS first so that there are no surprises...
12513 */
12514 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12515 movl %eax, pa(max_pfn_mapped)
12516
12517 /* Do early initialization of the fixmap area */
12518 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12519 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520 +#ifdef CONFIG_COMPAT_VDSO
12521 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12522 +#else
12523 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12524 +#endif
12525 #else /* Not PAE */
12526
12527 page_pde_offset = (__PAGE_OFFSET >> 20);
12528 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12529 movl %eax, pa(max_pfn_mapped)
12530
12531 /* Do early initialization of the fixmap area */
12532 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12533 - movl %eax,pa(initial_page_table+0xffc)
12534 +#ifdef CONFIG_COMPAT_VDSO
12535 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12536 +#else
12537 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12538 +#endif
12539 #endif
12540
12541 #ifdef CONFIG_PARAVIRT
12542 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12543 cmpl $num_subarch_entries, %eax
12544 jae bad_subarch
12545
12546 - movl pa(subarch_entries)(,%eax,4), %eax
12547 - subl $__PAGE_OFFSET, %eax
12548 - jmp *%eax
12549 + jmp *pa(subarch_entries)(,%eax,4)
12550
12551 bad_subarch:
12552 WEAK(lguest_entry)
12553 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12554 __INITDATA
12555
12556 subarch_entries:
12557 - .long default_entry /* normal x86/PC */
12558 - .long lguest_entry /* lguest hypervisor */
12559 - .long xen_entry /* Xen hypervisor */
12560 - .long default_entry /* Moorestown MID */
12561 + .long ta(default_entry) /* normal x86/PC */
12562 + .long ta(lguest_entry) /* lguest hypervisor */
12563 + .long ta(xen_entry) /* Xen hypervisor */
12564 + .long ta(default_entry) /* Moorestown MID */
12565 num_subarch_entries = (. - subarch_entries) / 4
12566 .previous
12567 #else
12568 @@ -312,6 +382,7 @@ default_entry:
12569 orl %edx,%eax
12570 movl %eax,%cr4
12571
12572 +#ifdef CONFIG_X86_PAE
12573 testb $X86_CR4_PAE, %al # check if PAE is enabled
12574 jz 6f
12575
12576 @@ -340,6 +411,9 @@ default_entry:
12577 /* Make changes effective */
12578 wrmsr
12579
12580 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12581 +#endif
12582 +
12583 6:
12584
12585 /*
12586 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12587 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12588 movl %eax,%ss # after changing gdt.
12589
12590 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12591 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12592 movl %eax,%ds
12593 movl %eax,%es
12594
12595 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12596 */
12597 cmpb $0,ready
12598 jne 1f
12599 - movl $gdt_page,%eax
12600 + movl $cpu_gdt_table,%eax
12601 movl $stack_canary,%ecx
12602 +#ifdef CONFIG_SMP
12603 + addl $__per_cpu_load,%ecx
12604 +#endif
12605 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12606 shrl $16, %ecx
12607 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12608 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12609 1:
12610 -#endif
12611 movl $(__KERNEL_STACK_CANARY),%eax
12612 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12613 + movl $(__USER_DS),%eax
12614 +#else
12615 + xorl %eax,%eax
12616 +#endif
12617 movl %eax,%gs
12618
12619 xorl %eax,%eax # Clear LDT
12620 @@ -558,22 +639,22 @@ early_page_fault:
12621 jmp early_fault
12622
12623 early_fault:
12624 - cld
12625 #ifdef CONFIG_PRINTK
12626 + cmpl $1,%ss:early_recursion_flag
12627 + je hlt_loop
12628 + incl %ss:early_recursion_flag
12629 + cld
12630 pusha
12631 movl $(__KERNEL_DS),%eax
12632 movl %eax,%ds
12633 movl %eax,%es
12634 - cmpl $2,early_recursion_flag
12635 - je hlt_loop
12636 - incl early_recursion_flag
12637 movl %cr2,%eax
12638 pushl %eax
12639 pushl %edx /* trapno */
12640 pushl $fault_msg
12641 call printk
12642 +; call dump_stack
12643 #endif
12644 - call dump_stack
12645 hlt_loop:
12646 hlt
12647 jmp hlt_loop
12648 @@ -581,8 +662,11 @@ hlt_loop:
12649 /* This is the default interrupt "handler" :-) */
12650 ALIGN
12651 ignore_int:
12652 - cld
12653 #ifdef CONFIG_PRINTK
12654 + cmpl $2,%ss:early_recursion_flag
12655 + je hlt_loop
12656 + incl %ss:early_recursion_flag
12657 + cld
12658 pushl %eax
12659 pushl %ecx
12660 pushl %edx
12661 @@ -591,9 +675,6 @@ ignore_int:
12662 movl $(__KERNEL_DS),%eax
12663 movl %eax,%ds
12664 movl %eax,%es
12665 - cmpl $2,early_recursion_flag
12666 - je hlt_loop
12667 - incl early_recursion_flag
12668 pushl 16(%esp)
12669 pushl 24(%esp)
12670 pushl 32(%esp)
12671 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12672 /*
12673 * BSS section
12674 */
12675 -__PAGE_ALIGNED_BSS
12676 - .align PAGE_SIZE
12677 #ifdef CONFIG_X86_PAE
12678 +.section .initial_pg_pmd,"a",@progbits
12679 initial_pg_pmd:
12680 .fill 1024*KPMDS,4,0
12681 #else
12682 +.section .initial_page_table,"a",@progbits
12683 ENTRY(initial_page_table)
12684 .fill 1024,4,0
12685 #endif
12686 +.section .initial_pg_fixmap,"a",@progbits
12687 initial_pg_fixmap:
12688 .fill 1024,4,0
12689 +.section .empty_zero_page,"a",@progbits
12690 ENTRY(empty_zero_page)
12691 .fill 4096,1,0
12692 +.section .swapper_pg_dir,"a",@progbits
12693 ENTRY(swapper_pg_dir)
12694 +#ifdef CONFIG_X86_PAE
12695 + .fill 4,8,0
12696 +#else
12697 .fill 1024,4,0
12698 +#endif
12699 +
12700 +/*
12701 + * The IDT has to be page-aligned to simplify the Pentium
12702 + * F0 0F bug workaround.. We have a special link segment
12703 + * for this.
12704 + */
12705 +.section .idt,"a",@progbits
12706 +ENTRY(idt_table)
12707 + .fill 256,8,0
12708
12709 /*
12710 * This starts the data section.
12711 */
12712 #ifdef CONFIG_X86_PAE
12713 -__PAGE_ALIGNED_DATA
12714 - /* Page-aligned for the benefit of paravirt? */
12715 - .align PAGE_SIZE
12716 +.section .initial_page_table,"a",@progbits
12717 ENTRY(initial_page_table)
12718 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12719 # if KPMDS == 3
12720 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12721 # error "Kernel PMDs should be 1, 2 or 3"
12722 # endif
12723 .align PAGE_SIZE /* needs to be page-sized too */
12724 +
12725 +#ifdef CONFIG_PAX_PER_CPU_PGD
12726 +ENTRY(cpu_pgd)
12727 + .rept NR_CPUS
12728 + .fill 4,8,0
12729 + .endr
12730 +#endif
12731 +
12732 #endif
12733
12734 .data
12735 .balign 4
12736 ENTRY(stack_start)
12737 - .long init_thread_union+THREAD_SIZE
12738 + .long init_thread_union+THREAD_SIZE-8
12739 +
12740 +ready: .byte 0
12741
12742 +.section .rodata,"a",@progbits
12743 early_recursion_flag:
12744 .long 0
12745
12746 -ready: .byte 0
12747 -
12748 int_msg:
12749 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12750
12751 @@ -707,7 +811,7 @@ fault_msg:
12752 .word 0 # 32 bit align gdt_desc.address
12753 boot_gdt_descr:
12754 .word __BOOT_DS+7
12755 - .long boot_gdt - __PAGE_OFFSET
12756 + .long pa(boot_gdt)
12757
12758 .word 0 # 32-bit align idt_desc.address
12759 idt_descr:
12760 @@ -718,7 +822,7 @@ idt_descr:
12761 .word 0 # 32 bit align gdt_desc.address
12762 ENTRY(early_gdt_descr)
12763 .word GDT_ENTRIES*8-1
12764 - .long gdt_page /* Overwritten for secondary CPUs */
12765 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12766
12767 /*
12768 * The boot_gdt must mirror the equivalent in setup.S and is
12769 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12770 .align L1_CACHE_BYTES
12771 ENTRY(boot_gdt)
12772 .fill GDT_ENTRY_BOOT_CS,8,0
12773 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12774 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12775 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12776 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12777 +
12778 + .align PAGE_SIZE_asm
12779 +ENTRY(cpu_gdt_table)
12780 + .rept NR_CPUS
12781 + .quad 0x0000000000000000 /* NULL descriptor */
12782 + .quad 0x0000000000000000 /* 0x0b reserved */
12783 + .quad 0x0000000000000000 /* 0x13 reserved */
12784 + .quad 0x0000000000000000 /* 0x1b reserved */
12785 +
12786 +#ifdef CONFIG_PAX_KERNEXEC
12787 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12788 +#else
12789 + .quad 0x0000000000000000 /* 0x20 unused */
12790 +#endif
12791 +
12792 + .quad 0x0000000000000000 /* 0x28 unused */
12793 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12794 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12795 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12796 + .quad 0x0000000000000000 /* 0x4b reserved */
12797 + .quad 0x0000000000000000 /* 0x53 reserved */
12798 + .quad 0x0000000000000000 /* 0x5b reserved */
12799 +
12800 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12801 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12802 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12803 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12804 +
12805 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12806 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12807 +
12808 + /*
12809 + * Segments used for calling PnP BIOS have byte granularity.
12810 + * The code segments and data segments have fixed 64k limits,
12811 + * the transfer segment sizes are set at run time.
12812 + */
12813 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12814 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12815 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12816 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12817 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12818 +
12819 + /*
12820 + * The APM segments have byte granularity and their bases
12821 + * are set at run time. All have 64k limits.
12822 + */
12823 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12824 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12825 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12826 +
12827 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12828 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12829 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12830 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12831 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12832 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12833 +
12834 + /* Be sure this is zeroed to avoid false validations in Xen */
12835 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12836 + .endr
12837 diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12838 --- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12839 +++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12840 @@ -19,6 +19,7 @@
12841 #include <asm/cache.h>
12842 #include <asm/processor-flags.h>
12843 #include <asm/percpu.h>
12844 +#include <asm/cpufeature.h>
12845
12846 #ifdef CONFIG_PARAVIRT
12847 #include <asm/asm-offsets.h>
12848 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12849 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12850 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12851 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12852 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12853 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12854 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12855 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12856
12857 .text
12858 __HEAD
12859 @@ -85,35 +90,22 @@ startup_64:
12860 */
12861 addq %rbp, init_level4_pgt + 0(%rip)
12862 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12863 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12864 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12865 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12866
12867 addq %rbp, level3_ident_pgt + 0(%rip)
12868 +#ifndef CONFIG_XEN
12869 + addq %rbp, level3_ident_pgt + 8(%rip)
12870 +#endif
12871
12872 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12873 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12874 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12875
12876 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12877 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12878 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12879
12880 - /* Add an Identity mapping if I am above 1G */
12881 - leaq _text(%rip), %rdi
12882 - andq $PMD_PAGE_MASK, %rdi
12883 -
12884 - movq %rdi, %rax
12885 - shrq $PUD_SHIFT, %rax
12886 - andq $(PTRS_PER_PUD - 1), %rax
12887 - jz ident_complete
12888 -
12889 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12890 - leaq level3_ident_pgt(%rip), %rbx
12891 - movq %rdx, 0(%rbx, %rax, 8)
12892 -
12893 - movq %rdi, %rax
12894 - shrq $PMD_SHIFT, %rax
12895 - andq $(PTRS_PER_PMD - 1), %rax
12896 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12897 - leaq level2_spare_pgt(%rip), %rbx
12898 - movq %rdx, 0(%rbx, %rax, 8)
12899 -ident_complete:
12900 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12901 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12902
12903 /*
12904 * Fixup the kernel text+data virtual addresses. Note that
12905 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12906 * after the boot processor executes this code.
12907 */
12908
12909 - /* Enable PAE mode and PGE */
12910 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12911 + /* Enable PAE mode and PSE/PGE */
12912 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12913 movq %rax, %cr4
12914
12915 /* Setup early boot stage 4 level pagetables. */
12916 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12917 movl $MSR_EFER, %ecx
12918 rdmsr
12919 btsl $_EFER_SCE, %eax /* Enable System Call */
12920 - btl $20,%edi /* No Execute supported? */
12921 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12922 jnc 1f
12923 btsl $_EFER_NX, %eax
12924 + leaq init_level4_pgt(%rip), %rdi
12925 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12926 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12927 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12928 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12929 1: wrmsr /* Make changes effective */
12930
12931 /* Setup cr0 */
12932 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12933 bad_address:
12934 jmp bad_address
12935
12936 - .section ".init.text","ax"
12937 + __INIT
12938 #ifdef CONFIG_EARLY_PRINTK
12939 .globl early_idt_handlers
12940 early_idt_handlers:
12941 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12942 #endif /* EARLY_PRINTK */
12943 1: hlt
12944 jmp 1b
12945 + .previous
12946
12947 #ifdef CONFIG_EARLY_PRINTK
12948 + __INITDATA
12949 early_recursion_flag:
12950 .long 0
12951 + .previous
12952
12953 + .section .rodata,"a",@progbits
12954 early_idt_msg:
12955 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12956 early_idt_ripmsg:
12957 .asciz "RIP %s\n"
12958 -#endif /* CONFIG_EARLY_PRINTK */
12959 .previous
12960 +#endif /* CONFIG_EARLY_PRINTK */
12961
12962 + .section .rodata,"a",@progbits
12963 #define NEXT_PAGE(name) \
12964 .balign PAGE_SIZE; \
12965 ENTRY(name)
12966 @@ -338,7 +340,6 @@ ENTRY(name)
12967 i = i + 1 ; \
12968 .endr
12969
12970 - .data
12971 /*
12972 * This default setting generates an ident mapping at address 0x100000
12973 * and a mapping for the kernel that precisely maps virtual address
12974 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12975 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12977 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12979 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12980 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12981 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12982 .org init_level4_pgt + L4_START_KERNEL*8, 0
12983 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12984 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12985
12986 +#ifdef CONFIG_PAX_PER_CPU_PGD
12987 +NEXT_PAGE(cpu_pgd)
12988 + .rept NR_CPUS
12989 + .fill 512,8,0
12990 + .endr
12991 +#endif
12992 +
12993 NEXT_PAGE(level3_ident_pgt)
12994 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12995 +#ifdef CONFIG_XEN
12996 .fill 511,8,0
12997 +#else
12998 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12999 + .fill 510,8,0
13000 +#endif
13001 +
13002 +NEXT_PAGE(level3_vmalloc_pgt)
13003 + .fill 512,8,0
13004 +
13005 +NEXT_PAGE(level3_vmemmap_pgt)
13006 + .fill L3_VMEMMAP_START,8,0
13007 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13008
13009 NEXT_PAGE(level3_kernel_pgt)
13010 .fill L3_START_KERNEL,8,0
13011 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13012 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13013 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13014
13015 +NEXT_PAGE(level2_vmemmap_pgt)
13016 + .fill 512,8,0
13017 +
13018 NEXT_PAGE(level2_fixmap_pgt)
13019 - .fill 506,8,0
13020 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13021 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13022 - .fill 5,8,0
13023 + .fill 507,8,0
13024 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13025 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13026 + .fill 4,8,0
13027
13028 -NEXT_PAGE(level1_fixmap_pgt)
13029 +NEXT_PAGE(level1_vsyscall_pgt)
13030 .fill 512,8,0
13031
13032 -NEXT_PAGE(level2_ident_pgt)
13033 - /* Since I easily can, map the first 1G.
13034 + /* Since I easily can, map the first 2G.
13035 * Don't set NX because code runs from these pages.
13036 */
13037 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13038 +NEXT_PAGE(level2_ident_pgt)
13039 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13040
13041 NEXT_PAGE(level2_kernel_pgt)
13042 /*
13043 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13044 * If you want to increase this then increase MODULES_VADDR
13045 * too.)
13046 */
13047 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13048 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13049 -
13050 -NEXT_PAGE(level2_spare_pgt)
13051 - .fill 512, 8, 0
13052 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13053
13054 #undef PMDS
13055 #undef NEXT_PAGE
13056
13057 - .data
13058 + .align PAGE_SIZE
13059 +ENTRY(cpu_gdt_table)
13060 + .rept NR_CPUS
13061 + .quad 0x0000000000000000 /* NULL descriptor */
13062 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13063 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13064 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13065 + .quad 0x00cffb000000ffff /* __USER32_CS */
13066 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13067 + .quad 0x00affb000000ffff /* __USER_CS */
13068 +
13069 +#ifdef CONFIG_PAX_KERNEXEC
13070 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13071 +#else
13072 + .quad 0x0 /* unused */
13073 +#endif
13074 +
13075 + .quad 0,0 /* TSS */
13076 + .quad 0,0 /* LDT */
13077 + .quad 0,0,0 /* three TLS descriptors */
13078 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13079 + /* asm/segment.h:GDT_ENTRIES must match this */
13080 +
13081 + /* zero the remaining page */
13082 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13083 + .endr
13084 +
13085 .align 16
13086 .globl early_gdt_descr
13087 early_gdt_descr:
13088 .word GDT_ENTRIES*8-1
13089 early_gdt_descr_base:
13090 - .quad INIT_PER_CPU_VAR(gdt_page)
13091 + .quad cpu_gdt_table
13092
13093 ENTRY(phys_base)
13094 /* This must match the first entry in level2_kernel_pgt */
13095 .quad 0x0000000000000000
13096
13097 #include "../../x86/xen/xen-head.S"
13098 -
13099 - .section .bss, "aw", @nobits
13100 +
13101 + .section .rodata,"a",@progbits
13102 .align L1_CACHE_BYTES
13103 ENTRY(idt_table)
13104 - .skip IDT_ENTRIES * 16
13105 + .fill 512,8,0
13106
13107 __PAGE_ALIGNED_BSS
13108 .align PAGE_SIZE
13109 diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13110 --- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13111 +++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13112 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13113 EXPORT_SYMBOL(cmpxchg8b_emu);
13114 #endif
13115
13116 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13117 +
13118 /* Networking helper routines. */
13119 EXPORT_SYMBOL(csum_partial_copy_generic);
13120 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13121 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13122
13123 EXPORT_SYMBOL(__get_user_1);
13124 EXPORT_SYMBOL(__get_user_2);
13125 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13126
13127 EXPORT_SYMBOL(csum_partial);
13128 EXPORT_SYMBOL(empty_zero_page);
13129 +
13130 +#ifdef CONFIG_PAX_KERNEXEC
13131 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13132 +#endif
13133 diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13134 --- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13135 +++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13136 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13137 "spurious 8259A interrupt: IRQ%d.\n", irq);
13138 spurious_irq_mask |= irqmask;
13139 }
13140 - atomic_inc(&irq_err_count);
13141 + atomic_inc_unchecked(&irq_err_count);
13142 /*
13143 * Theoretically we do not have to handle this IRQ,
13144 * but in Linux this does not cause problems and is
13145 diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13146 --- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13147 +++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13148 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13149 * way process stacks are handled. This is done by having a special
13150 * "init_task" linker map entry..
13151 */
13152 -union thread_union init_thread_union __init_task_data =
13153 - { INIT_THREAD_INFO(init_task) };
13154 +union thread_union init_thread_union __init_task_data;
13155
13156 /*
13157 * Initial task structure.
13158 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13159 * section. Since TSS's are completely CPU-local, we want them
13160 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13161 */
13162 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13163 -
13164 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13165 +EXPORT_SYMBOL(init_tss);
13166 diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13167 --- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13168 +++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13169 @@ -6,6 +6,7 @@
13170 #include <linux/sched.h>
13171 #include <linux/kernel.h>
13172 #include <linux/capability.h>
13173 +#include <linux/security.h>
13174 #include <linux/errno.h>
13175 #include <linux/types.h>
13176 #include <linux/ioport.h>
13177 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13178
13179 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13180 return -EINVAL;
13181 +#ifdef CONFIG_GRKERNSEC_IO
13182 + if (turn_on && grsec_disable_privio) {
13183 + gr_handle_ioperm();
13184 + return -EPERM;
13185 + }
13186 +#endif
13187 if (turn_on && !capable(CAP_SYS_RAWIO))
13188 return -EPERM;
13189
13190 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13191 * because the ->io_bitmap_max value must match the bitmap
13192 * contents:
13193 */
13194 - tss = &per_cpu(init_tss, get_cpu());
13195 + tss = init_tss + get_cpu();
13196
13197 if (turn_on)
13198 bitmap_clear(t->io_bitmap_ptr, from, num);
13199 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13200 return -EINVAL;
13201 /* Trying to gain more privileges? */
13202 if (level > old) {
13203 +#ifdef CONFIG_GRKERNSEC_IO
13204 + if (grsec_disable_privio) {
13205 + gr_handle_iopl();
13206 + return -EPERM;
13207 + }
13208 +#endif
13209 if (!capable(CAP_SYS_RAWIO))
13210 return -EPERM;
13211 }
13212 diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13213 --- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13214 +++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13215 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13216 __asm__ __volatile__("andl %%esp,%0" :
13217 "=r" (sp) : "0" (THREAD_SIZE - 1));
13218
13219 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13220 + return sp < STACK_WARN;
13221 }
13222
13223 static void print_stack_overflow(void)
13224 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13225 * per-CPU IRQ handling contexts (thread information and stack)
13226 */
13227 union irq_ctx {
13228 - struct thread_info tinfo;
13229 - u32 stack[THREAD_SIZE/sizeof(u32)];
13230 + unsigned long previous_esp;
13231 + u32 stack[THREAD_SIZE/sizeof(u32)];
13232 } __attribute__((aligned(THREAD_SIZE)));
13233
13234 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13235 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13236 static inline int
13237 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13238 {
13239 - union irq_ctx *curctx, *irqctx;
13240 + union irq_ctx *irqctx;
13241 u32 *isp, arg1, arg2;
13242
13243 - curctx = (union irq_ctx *) current_thread_info();
13244 irqctx = __this_cpu_read(hardirq_ctx);
13245
13246 /*
13247 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13248 * handler) we can't do that and just have to keep using the
13249 * current stack (which is the irq stack already after all)
13250 */
13251 - if (unlikely(curctx == irqctx))
13252 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13253 return 0;
13254
13255 /* build the stack frame on the IRQ stack */
13256 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13257 - irqctx->tinfo.task = curctx->tinfo.task;
13258 - irqctx->tinfo.previous_esp = current_stack_pointer;
13259 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13260 + irqctx->previous_esp = current_stack_pointer;
13261
13262 - /*
13263 - * Copy the softirq bits in preempt_count so that the
13264 - * softirq checks work in the hardirq context.
13265 - */
13266 - irqctx->tinfo.preempt_count =
13267 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13268 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13270 + __set_fs(MAKE_MM_SEG(0));
13271 +#endif
13272
13273 if (unlikely(overflow))
13274 call_on_stack(print_stack_overflow, isp);
13275 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13276 : "0" (irq), "1" (desc), "2" (isp),
13277 "D" (desc->handle_irq)
13278 : "memory", "cc", "ecx");
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 + __set_fs(current_thread_info()->addr_limit);
13282 +#endif
13283 +
13284 return 1;
13285 }
13286
13287 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13288 */
13289 void __cpuinit irq_ctx_init(int cpu)
13290 {
13291 - union irq_ctx *irqctx;
13292 -
13293 if (per_cpu(hardirq_ctx, cpu))
13294 return;
13295
13296 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13297 - THREAD_FLAGS,
13298 - THREAD_ORDER));
13299 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13300 - irqctx->tinfo.cpu = cpu;
13301 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13302 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13303 -
13304 - per_cpu(hardirq_ctx, cpu) = irqctx;
13305 -
13306 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13307 - THREAD_FLAGS,
13308 - THREAD_ORDER));
13309 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13310 - irqctx->tinfo.cpu = cpu;
13311 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13312 -
13313 - per_cpu(softirq_ctx, cpu) = irqctx;
13314 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13315 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13316
13317 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13318 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13319 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13320 asmlinkage void do_softirq(void)
13321 {
13322 unsigned long flags;
13323 - struct thread_info *curctx;
13324 union irq_ctx *irqctx;
13325 u32 *isp;
13326
13327 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13328 local_irq_save(flags);
13329
13330 if (local_softirq_pending()) {
13331 - curctx = current_thread_info();
13332 irqctx = __this_cpu_read(softirq_ctx);
13333 - irqctx->tinfo.task = curctx->task;
13334 - irqctx->tinfo.previous_esp = current_stack_pointer;
13335 + irqctx->previous_esp = current_stack_pointer;
13336
13337 /* build the stack frame on the softirq stack */
13338 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13339 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + __set_fs(MAKE_MM_SEG(0));
13343 +#endif
13344
13345 call_on_stack(__do_softirq, isp);
13346 +
13347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13348 + __set_fs(current_thread_info()->addr_limit);
13349 +#endif
13350 +
13351 /*
13352 * Shouldn't happen, we returned above if in_interrupt():
13353 */
13354 diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13355 --- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13356 +++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13357 @@ -17,7 +17,7 @@
13358 #include <asm/mce.h>
13359 #include <asm/hw_irq.h>
13360
13361 -atomic_t irq_err_count;
13362 +atomic_unchecked_t irq_err_count;
13363
13364 /* Function pointer for generic interrupt vector handling */
13365 void (*x86_platform_ipi_callback)(void) = NULL;
13366 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13367 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13368 seq_printf(p, " Machine check polls\n");
13369 #endif
13370 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13371 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13372 #if defined(CONFIG_X86_IO_APIC)
13373 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13374 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13375 #endif
13376 return 0;
13377 }
13378 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13379
13380 u64 arch_irq_stat(void)
13381 {
13382 - u64 sum = atomic_read(&irq_err_count);
13383 + u64 sum = atomic_read_unchecked(&irq_err_count);
13384
13385 #ifdef CONFIG_X86_IO_APIC
13386 - sum += atomic_read(&irq_mis_count);
13387 + sum += atomic_read_unchecked(&irq_mis_count);
13388 #endif
13389 return sum;
13390 }
13391 diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13392 --- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13393 +++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13394 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13395 #ifdef CONFIG_X86_32
13396 switch (regno) {
13397 case GDB_SS:
13398 - if (!user_mode_vm(regs))
13399 + if (!user_mode(regs))
13400 *(unsigned long *)mem = __KERNEL_DS;
13401 break;
13402 case GDB_SP:
13403 - if (!user_mode_vm(regs))
13404 + if (!user_mode(regs))
13405 *(unsigned long *)mem = kernel_stack_pointer(regs);
13406 break;
13407 case GDB_GS:
13408 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13409 case 'k':
13410 /* clear the trace bit */
13411 linux_regs->flags &= ~X86_EFLAGS_TF;
13412 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13413 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13414
13415 /* set the trace bit if we're stepping */
13416 if (remcomInBuffer[0] == 's') {
13417 linux_regs->flags |= X86_EFLAGS_TF;
13418 - atomic_set(&kgdb_cpu_doing_single_step,
13419 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13420 raw_smp_processor_id());
13421 }
13422
13423 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13424 return NOTIFY_DONE;
13425
13426 case DIE_DEBUG:
13427 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13428 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13429 if (user_mode(regs))
13430 return single_step_cont(regs, args);
13431 break;
13432 diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13433 --- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13434 +++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13435 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13436 } __attribute__((packed)) *insn;
13437
13438 insn = (struct __arch_relative_insn *)from;
13439 +
13440 + pax_open_kernel();
13441 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13442 insn->op = op;
13443 + pax_close_kernel();
13444 }
13445
13446 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13447 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13448 kprobe_opcode_t opcode;
13449 kprobe_opcode_t *orig_opcodes = opcodes;
13450
13451 - if (search_exception_tables((unsigned long)opcodes))
13452 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13453 return 0; /* Page fault may occur on this address. */
13454
13455 retry:
13456 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13457 }
13458 }
13459 insn_get_length(&insn);
13460 + pax_open_kernel();
13461 memcpy(dest, insn.kaddr, insn.length);
13462 + pax_close_kernel();
13463
13464 #ifdef CONFIG_X86_64
13465 if (insn_rip_relative(&insn)) {
13466 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13467 (u8 *) dest;
13468 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13469 disp = (u8 *) dest + insn_offset_displacement(&insn);
13470 + pax_open_kernel();
13471 *(s32 *) disp = (s32) newdisp;
13472 + pax_close_kernel();
13473 }
13474 #endif
13475 return insn.length;
13476 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13477 */
13478 __copy_instruction(p->ainsn.insn, p->addr, 0);
13479
13480 - if (can_boost(p->addr))
13481 + if (can_boost(ktla_ktva(p->addr)))
13482 p->ainsn.boostable = 0;
13483 else
13484 p->ainsn.boostable = -1;
13485
13486 - p->opcode = *p->addr;
13487 + p->opcode = *(ktla_ktva(p->addr));
13488 }
13489
13490 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13491 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13492 * nor set current_kprobe, because it doesn't use single
13493 * stepping.
13494 */
13495 - regs->ip = (unsigned long)p->ainsn.insn;
13496 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13497 preempt_enable_no_resched();
13498 return;
13499 }
13500 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13501 if (p->opcode == BREAKPOINT_INSTRUCTION)
13502 regs->ip = (unsigned long)p->addr;
13503 else
13504 - regs->ip = (unsigned long)p->ainsn.insn;
13505 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506 }
13507
13508 /*
13509 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13510 setup_singlestep(p, regs, kcb, 0);
13511 return 1;
13512 }
13513 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13514 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13515 /*
13516 * The breakpoint instruction was removed right
13517 * after we hit it. Another cpu has removed
13518 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13519 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13520 {
13521 unsigned long *tos = stack_addr(regs);
13522 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13523 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13524 unsigned long orig_ip = (unsigned long)p->addr;
13525 kprobe_opcode_t *insn = p->ainsn.insn;
13526
13527 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13528 struct die_args *args = data;
13529 int ret = NOTIFY_DONE;
13530
13531 - if (args->regs && user_mode_vm(args->regs))
13532 + if (args->regs && user_mode(args->regs))
13533 return ret;
13534
13535 switch (val) {
13536 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13537 * Verify if the address gap is in 2GB range, because this uses
13538 * a relative jump.
13539 */
13540 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13541 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13542 if (abs(rel) > 0x7fffffff)
13543 return -ERANGE;
13544
13545 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13546 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13547
13548 /* Set probe function call */
13549 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13550 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13551
13552 /* Set returning jmp instruction at the tail of out-of-line buffer */
13553 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13554 - (u8 *)op->kp.addr + op->optinsn.size);
13555 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13556
13557 flush_icache_range((unsigned long) buf,
13558 (unsigned long) buf + TMPL_END_IDX +
13559 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13560 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13561
13562 /* Backup instructions which will be replaced by jump address */
13563 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13564 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13565 RELATIVE_ADDR_SIZE);
13566
13567 insn_buf[0] = RELATIVEJUMP_OPCODE;
13568 diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13569 --- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13570 +++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13571 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13572 pv_mmu_ops.set_pud = kvm_set_pud;
13573 #if PAGETABLE_LEVELS == 4
13574 pv_mmu_ops.set_pgd = kvm_set_pgd;
13575 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13576 #endif
13577 #endif
13578 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13579 diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13580 --- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13581 +++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13582 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13583 if (reload) {
13584 #ifdef CONFIG_SMP
13585 preempt_disable();
13586 - load_LDT(pc);
13587 + load_LDT_nolock(pc);
13588 if (!cpumask_equal(mm_cpumask(current->mm),
13589 cpumask_of(smp_processor_id())))
13590 smp_call_function(flush_ldt, current->mm, 1);
13591 preempt_enable();
13592 #else
13593 - load_LDT(pc);
13594 + load_LDT_nolock(pc);
13595 #endif
13596 }
13597 if (oldsize) {
13598 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13599 return err;
13600
13601 for (i = 0; i < old->size; i++)
13602 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13603 + write_ldt_entry(new->ldt, i, old->ldt + i);
13604 return 0;
13605 }
13606
13607 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13608 retval = copy_ldt(&mm->context, &old_mm->context);
13609 mutex_unlock(&old_mm->context.lock);
13610 }
13611 +
13612 + if (tsk == current) {
13613 + mm->context.vdso = 0;
13614 +
13615 +#ifdef CONFIG_X86_32
13616 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13617 + mm->context.user_cs_base = 0UL;
13618 + mm->context.user_cs_limit = ~0UL;
13619 +
13620 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13621 + cpus_clear(mm->context.cpu_user_cs_mask);
13622 +#endif
13623 +
13624 +#endif
13625 +#endif
13626 +
13627 + }
13628 +
13629 return retval;
13630 }
13631
13632 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13633 }
13634 }
13635
13636 +#ifdef CONFIG_PAX_SEGMEXEC
13637 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13638 + error = -EINVAL;
13639 + goto out_unlock;
13640 + }
13641 +#endif
13642 +
13643 fill_ldt(&ldt, &ldt_info);
13644 if (oldmode)
13645 ldt.avl = 0;
13646 diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13647 --- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13648 +++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13649 @@ -27,7 +27,7 @@
13650 #include <asm/cacheflush.h>
13651 #include <asm/debugreg.h>
13652
13653 -static void set_idt(void *newidt, __u16 limit)
13654 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13655 {
13656 struct desc_ptr curidt;
13657
13658 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13659 }
13660
13661
13662 -static void set_gdt(void *newgdt, __u16 limit)
13663 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13664 {
13665 struct desc_ptr curgdt;
13666
13667 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13668 }
13669
13670 control_page = page_address(image->control_code_page);
13671 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13672 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13673
13674 relocate_kernel_ptr = control_page;
13675 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13676 diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13677 --- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13678 +++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13679 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13680
13681 static int get_ucode_user(void *to, const void *from, size_t n)
13682 {
13683 - return copy_from_user(to, from, n);
13684 + return copy_from_user(to, (__force const void __user *)from, n);
13685 }
13686
13687 static enum ucode_state
13688 request_microcode_user(int cpu, const void __user *buf, size_t size)
13689 {
13690 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13691 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13692 }
13693
13694 static void microcode_fini_cpu(int cpu)
13695 diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13696 --- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13697 +++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13698 @@ -36,21 +36,66 @@
13699 #define DEBUGP(fmt...)
13700 #endif
13701
13702 -void *module_alloc(unsigned long size)
13703 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13704 {
13705 if (PAGE_ALIGN(size) > MODULES_LEN)
13706 return NULL;
13707 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13708 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13709 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13710 -1, __builtin_return_address(0));
13711 }
13712
13713 +void *module_alloc(unsigned long size)
13714 +{
13715 +
13716 +#ifdef CONFIG_PAX_KERNEXEC
13717 + return __module_alloc(size, PAGE_KERNEL);
13718 +#else
13719 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13720 +#endif
13721 +
13722 +}
13723 +
13724 /* Free memory returned from module_alloc */
13725 void module_free(struct module *mod, void *module_region)
13726 {
13727 vfree(module_region);
13728 }
13729
13730 +#ifdef CONFIG_PAX_KERNEXEC
13731 +#ifdef CONFIG_X86_32
13732 +void *module_alloc_exec(unsigned long size)
13733 +{
13734 + struct vm_struct *area;
13735 +
13736 + if (size == 0)
13737 + return NULL;
13738 +
13739 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13740 + return area ? area->addr : NULL;
13741 +}
13742 +EXPORT_SYMBOL(module_alloc_exec);
13743 +
13744 +void module_free_exec(struct module *mod, void *module_region)
13745 +{
13746 + vunmap(module_region);
13747 +}
13748 +EXPORT_SYMBOL(module_free_exec);
13749 +#else
13750 +void module_free_exec(struct module *mod, void *module_region)
13751 +{
13752 + module_free(mod, module_region);
13753 +}
13754 +EXPORT_SYMBOL(module_free_exec);
13755 +
13756 +void *module_alloc_exec(unsigned long size)
13757 +{
13758 + return __module_alloc(size, PAGE_KERNEL_RX);
13759 +}
13760 +EXPORT_SYMBOL(module_alloc_exec);
13761 +#endif
13762 +#endif
13763 +
13764 /* We don't need anything special. */
13765 int module_frob_arch_sections(Elf_Ehdr *hdr,
13766 Elf_Shdr *sechdrs,
13767 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13768 unsigned int i;
13769 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13770 Elf32_Sym *sym;
13771 - uint32_t *location;
13772 + uint32_t *plocation, location;
13773
13774 DEBUGP("Applying relocate section %u to %u\n", relsec,
13775 sechdrs[relsec].sh_info);
13776 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13777 /* This is where to make the change */
13778 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13779 - + rel[i].r_offset;
13780 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13781 + location = (uint32_t)plocation;
13782 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13783 + plocation = ktla_ktva((void *)plocation);
13784 /* This is the symbol it is referring to. Note that all
13785 undefined symbols have been resolved. */
13786 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13787 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13788 switch (ELF32_R_TYPE(rel[i].r_info)) {
13789 case R_386_32:
13790 /* We add the value into the location given */
13791 - *location += sym->st_value;
13792 + pax_open_kernel();
13793 + *plocation += sym->st_value;
13794 + pax_close_kernel();
13795 break;
13796 case R_386_PC32:
13797 /* Add the value, subtract its postition */
13798 - *location += sym->st_value - (uint32_t)location;
13799 + pax_open_kernel();
13800 + *plocation += sym->st_value - location;
13801 + pax_close_kernel();
13802 break;
13803 default:
13804 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13805 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13806 case R_X86_64_NONE:
13807 break;
13808 case R_X86_64_64:
13809 + pax_open_kernel();
13810 *(u64 *)loc = val;
13811 + pax_close_kernel();
13812 break;
13813 case R_X86_64_32:
13814 + pax_open_kernel();
13815 *(u32 *)loc = val;
13816 + pax_close_kernel();
13817 if (val != *(u32 *)loc)
13818 goto overflow;
13819 break;
13820 case R_X86_64_32S:
13821 + pax_open_kernel();
13822 *(s32 *)loc = val;
13823 + pax_close_kernel();
13824 if ((s64)val != *(s32 *)loc)
13825 goto overflow;
13826 break;
13827 case R_X86_64_PC32:
13828 val -= (u64)loc;
13829 + pax_open_kernel();
13830 *(u32 *)loc = val;
13831 + pax_close_kernel();
13832 +
13833 #if 0
13834 if ((s64)val != *(s32 *)loc)
13835 goto overflow;
13836 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13837 --- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13838 +++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13839 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13840 {
13841 return x;
13842 }
13843 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13844 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13845 +#endif
13846
13847 void __init default_banner(void)
13848 {
13849 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13850 * corresponding structure. */
13851 static void *get_call_destination(u8 type)
13852 {
13853 - struct paravirt_patch_template tmpl = {
13854 + const struct paravirt_patch_template tmpl = {
13855 .pv_init_ops = pv_init_ops,
13856 .pv_time_ops = pv_time_ops,
13857 .pv_cpu_ops = pv_cpu_ops,
13858 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13859 .pv_lock_ops = pv_lock_ops,
13860 #endif
13861 };
13862 +
13863 + pax_track_stack();
13864 +
13865 return *((void **)&tmpl + type);
13866 }
13867
13868 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13869 if (opfunc == NULL)
13870 /* If there's no function, patch it with a ud2a (BUG) */
13871 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13872 - else if (opfunc == _paravirt_nop)
13873 + else if (opfunc == (void *)_paravirt_nop)
13874 /* If the operation is a nop, then nop the callsite */
13875 ret = paravirt_patch_nop();
13876
13877 /* identity functions just return their single argument */
13878 - else if (opfunc == _paravirt_ident_32)
13879 + else if (opfunc == (void *)_paravirt_ident_32)
13880 ret = paravirt_patch_ident_32(insnbuf, len);
13881 - else if (opfunc == _paravirt_ident_64)
13882 + else if (opfunc == (void *)_paravirt_ident_64)
13883 ret = paravirt_patch_ident_64(insnbuf, len);
13884 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13885 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13886 + ret = paravirt_patch_ident_64(insnbuf, len);
13887 +#endif
13888
13889 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13890 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13891 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13892 if (insn_len > len || start == NULL)
13893 insn_len = len;
13894 else
13895 - memcpy(insnbuf, start, insn_len);
13896 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13897
13898 return insn_len;
13899 }
13900 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13901 preempt_enable();
13902 }
13903
13904 -struct pv_info pv_info = {
13905 +struct pv_info pv_info __read_only = {
13906 .name = "bare hardware",
13907 .paravirt_enabled = 0,
13908 .kernel_rpl = 0,
13909 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13910 };
13911
13912 -struct pv_init_ops pv_init_ops = {
13913 +struct pv_init_ops pv_init_ops __read_only = {
13914 .patch = native_patch,
13915 };
13916
13917 -struct pv_time_ops pv_time_ops = {
13918 +struct pv_time_ops pv_time_ops __read_only = {
13919 .sched_clock = native_sched_clock,
13920 };
13921
13922 -struct pv_irq_ops pv_irq_ops = {
13923 +struct pv_irq_ops pv_irq_ops __read_only = {
13924 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13925 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13926 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13927 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13928 #endif
13929 };
13930
13931 -struct pv_cpu_ops pv_cpu_ops = {
13932 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13933 .cpuid = native_cpuid,
13934 .get_debugreg = native_get_debugreg,
13935 .set_debugreg = native_set_debugreg,
13936 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13937 .end_context_switch = paravirt_nop,
13938 };
13939
13940 -struct pv_apic_ops pv_apic_ops = {
13941 +struct pv_apic_ops pv_apic_ops __read_only = {
13942 #ifdef CONFIG_X86_LOCAL_APIC
13943 .startup_ipi_hook = paravirt_nop,
13944 #endif
13945 };
13946
13947 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13948 +#ifdef CONFIG_X86_32
13949 +#ifdef CONFIG_X86_PAE
13950 +/* 64-bit pagetable entries */
13951 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13952 +#else
13953 /* 32-bit pagetable entries */
13954 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13955 +#endif
13956 #else
13957 /* 64-bit pagetable entries */
13958 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13959 #endif
13960
13961 -struct pv_mmu_ops pv_mmu_ops = {
13962 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13963
13964 .read_cr2 = native_read_cr2,
13965 .write_cr2 = native_write_cr2,
13966 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13967 .make_pud = PTE_IDENT,
13968
13969 .set_pgd = native_set_pgd,
13970 + .set_pgd_batched = native_set_pgd_batched,
13971 #endif
13972 #endif /* PAGETABLE_LEVELS >= 3 */
13973
13974 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13975 },
13976
13977 .set_fixmap = native_set_fixmap,
13978 +
13979 +#ifdef CONFIG_PAX_KERNEXEC
13980 + .pax_open_kernel = native_pax_open_kernel,
13981 + .pax_close_kernel = native_pax_close_kernel,
13982 +#endif
13983 +
13984 };
13985
13986 EXPORT_SYMBOL_GPL(pv_time_ops);
13987 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
13988 --- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13989 +++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13990 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13991 arch_spin_lock(lock);
13992 }
13993
13994 -struct pv_lock_ops pv_lock_ops = {
13995 +struct pv_lock_ops pv_lock_ops __read_only = {
13996 #ifdef CONFIG_SMP
13997 .spin_is_locked = __ticket_spin_is_locked,
13998 .spin_is_contended = __ticket_spin_is_contended,
13999 diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14000 --- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14001 +++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14002 @@ -2,7 +2,7 @@
14003 #include <asm/iommu_table.h>
14004 #include <linux/string.h>
14005 #include <linux/kallsyms.h>
14006 -
14007 +#include <linux/sched.h>
14008
14009 #define DEBUG 1
14010
14011 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14012 {
14013 struct iommu_table_entry *p, *q, *x;
14014
14015 + pax_track_stack();
14016 +
14017 /* Simple cyclic dependency checker. */
14018 for (p = start; p < finish; p++) {
14019 q = find_dependents_of(start, finish, p);
14020 diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14021 --- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14022 +++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14023 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14024 unsigned long thread_saved_pc(struct task_struct *tsk)
14025 {
14026 return ((unsigned long *)tsk->thread.sp)[3];
14027 +//XXX return tsk->thread.eip;
14028 }
14029
14030 #ifndef CONFIG_SMP
14031 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14032 unsigned long sp;
14033 unsigned short ss, gs;
14034
14035 - if (user_mode_vm(regs)) {
14036 + if (user_mode(regs)) {
14037 sp = regs->sp;
14038 ss = regs->ss & 0xffff;
14039 - gs = get_user_gs(regs);
14040 } else {
14041 sp = kernel_stack_pointer(regs);
14042 savesegment(ss, ss);
14043 - savesegment(gs, gs);
14044 }
14045 + gs = get_user_gs(regs);
14046
14047 show_regs_common();
14048
14049 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14050 struct task_struct *tsk;
14051 int err;
14052
14053 - childregs = task_pt_regs(p);
14054 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14055 *childregs = *regs;
14056 childregs->ax = 0;
14057 childregs->sp = sp;
14058
14059 p->thread.sp = (unsigned long) childregs;
14060 p->thread.sp0 = (unsigned long) (childregs+1);
14061 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14062
14063 p->thread.ip = (unsigned long) ret_from_fork;
14064
14065 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14066 struct thread_struct *prev = &prev_p->thread,
14067 *next = &next_p->thread;
14068 int cpu = smp_processor_id();
14069 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14070 + struct tss_struct *tss = init_tss + cpu;
14071 bool preload_fpu;
14072
14073 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14074 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14075 */
14076 lazy_save_gs(prev->gs);
14077
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + __set_fs(task_thread_info(next_p)->addr_limit);
14080 +#endif
14081 +
14082 /*
14083 * Load the per-thread Thread-Local Storage descriptor.
14084 */
14085 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14086 */
14087 arch_end_context_switch(next_p);
14088
14089 + percpu_write(current_task, next_p);
14090 + percpu_write(current_tinfo, &next_p->tinfo);
14091 +
14092 if (preload_fpu)
14093 __math_state_restore();
14094
14095 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14096 if (prev->gs | next->gs)
14097 lazy_load_gs(next->gs);
14098
14099 - percpu_write(current_task, next_p);
14100 -
14101 return prev_p;
14102 }
14103
14104 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14105 } while (count++ < 16);
14106 return 0;
14107 }
14108 -
14109 diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14110 --- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14111 +++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14112 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14113 void exit_idle(void)
14114 {
14115 /* idle loop has pid 0 */
14116 - if (current->pid)
14117 + if (task_pid_nr(current))
14118 return;
14119 __exit_idle();
14120 }
14121 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14122 struct pt_regs *childregs;
14123 struct task_struct *me = current;
14124
14125 - childregs = ((struct pt_regs *)
14126 - (THREAD_SIZE + task_stack_page(p))) - 1;
14127 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14128 *childregs = *regs;
14129
14130 childregs->ax = 0;
14131 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14132 p->thread.sp = (unsigned long) childregs;
14133 p->thread.sp0 = (unsigned long) (childregs+1);
14134 p->thread.usersp = me->thread.usersp;
14135 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14136
14137 set_tsk_thread_flag(p, TIF_FORK);
14138
14139 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14140 struct thread_struct *prev = &prev_p->thread;
14141 struct thread_struct *next = &next_p->thread;
14142 int cpu = smp_processor_id();
14143 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14144 + struct tss_struct *tss = init_tss + cpu;
14145 unsigned fsindex, gsindex;
14146 bool preload_fpu;
14147
14148 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14149 prev->usersp = percpu_read(old_rsp);
14150 percpu_write(old_rsp, next->usersp);
14151 percpu_write(current_task, next_p);
14152 + percpu_write(current_tinfo, &next_p->tinfo);
14153
14154 - percpu_write(kernel_stack,
14155 - (unsigned long)task_stack_page(next_p) +
14156 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14157 + percpu_write(kernel_stack, next->sp0);
14158
14159 /*
14160 * Now maybe reload the debug registers and handle I/O bitmaps
14161 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14162 if (!p || p == current || p->state == TASK_RUNNING)
14163 return 0;
14164 stack = (unsigned long)task_stack_page(p);
14165 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14166 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14167 return 0;
14168 fp = *(u64 *)(p->thread.sp);
14169 do {
14170 - if (fp < (unsigned long)stack ||
14171 - fp >= (unsigned long)stack+THREAD_SIZE)
14172 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14173 return 0;
14174 ip = *(u64 *)(fp+8);
14175 if (!in_sched_functions(ip))
14176 diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14177 --- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14178 +++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14179 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14180
14181 void free_thread_info(struct thread_info *ti)
14182 {
14183 - free_thread_xstate(ti->task);
14184 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14185 }
14186
14187 +static struct kmem_cache *task_struct_cachep;
14188 +
14189 void arch_task_cache_init(void)
14190 {
14191 - task_xstate_cachep =
14192 - kmem_cache_create("task_xstate", xstate_size,
14193 + /* create a slab on which task_structs can be allocated */
14194 + task_struct_cachep =
14195 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14196 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14197 +
14198 + task_xstate_cachep =
14199 + kmem_cache_create("task_xstate", xstate_size,
14200 __alignof__(union thread_xstate),
14201 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14202 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14203 +}
14204 +
14205 +struct task_struct *alloc_task_struct_node(int node)
14206 +{
14207 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14208 +}
14209 +
14210 +void free_task_struct(struct task_struct *task)
14211 +{
14212 + free_thread_xstate(task);
14213 + kmem_cache_free(task_struct_cachep, task);
14214 }
14215
14216 /*
14217 @@ -70,7 +87,7 @@ void exit_thread(void)
14218 unsigned long *bp = t->io_bitmap_ptr;
14219
14220 if (bp) {
14221 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14222 + struct tss_struct *tss = init_tss + get_cpu();
14223
14224 t->io_bitmap_ptr = NULL;
14225 clear_thread_flag(TIF_IO_BITMAP);
14226 @@ -106,7 +123,7 @@ void show_regs_common(void)
14227
14228 printk(KERN_CONT "\n");
14229 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14230 - current->pid, current->comm, print_tainted(),
14231 + task_pid_nr(current), current->comm, print_tainted(),
14232 init_utsname()->release,
14233 (int)strcspn(init_utsname()->version, " "),
14234 init_utsname()->version);
14235 @@ -120,6 +137,9 @@ void flush_thread(void)
14236 {
14237 struct task_struct *tsk = current;
14238
14239 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14240 + loadsegment(gs, 0);
14241 +#endif
14242 flush_ptrace_hw_breakpoint(tsk);
14243 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14244 /*
14245 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14246 regs.di = (unsigned long) arg;
14247
14248 #ifdef CONFIG_X86_32
14249 - regs.ds = __USER_DS;
14250 - regs.es = __USER_DS;
14251 + regs.ds = __KERNEL_DS;
14252 + regs.es = __KERNEL_DS;
14253 regs.fs = __KERNEL_PERCPU;
14254 - regs.gs = __KERNEL_STACK_CANARY;
14255 + savesegment(gs, regs.gs);
14256 #else
14257 regs.ss = __KERNEL_DS;
14258 #endif
14259 @@ -403,7 +423,7 @@ void default_idle(void)
14260 EXPORT_SYMBOL(default_idle);
14261 #endif
14262
14263 -void stop_this_cpu(void *dummy)
14264 +__noreturn void stop_this_cpu(void *dummy)
14265 {
14266 local_irq_disable();
14267 /*
14268 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14269 }
14270 early_param("idle", idle_setup);
14271
14272 -unsigned long arch_align_stack(unsigned long sp)
14273 +#ifdef CONFIG_PAX_RANDKSTACK
14274 +asmlinkage void pax_randomize_kstack(void)
14275 {
14276 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14277 - sp -= get_random_int() % 8192;
14278 - return sp & ~0xf;
14279 -}
14280 + struct thread_struct *thread = &current->thread;
14281 + unsigned long time;
14282
14283 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14284 -{
14285 - unsigned long range_end = mm->brk + 0x02000000;
14286 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14287 -}
14288 + if (!randomize_va_space)
14289 + return;
14290 +
14291 + rdtscl(time);
14292 +
14293 + /* P4 seems to return a 0 LSB, ignore it */
14294 +#ifdef CONFIG_MPENTIUM4
14295 + time &= 0x3EUL;
14296 + time <<= 2;
14297 +#elif defined(CONFIG_X86_64)
14298 + time &= 0xFUL;
14299 + time <<= 4;
14300 +#else
14301 + time &= 0x1FUL;
14302 + time <<= 3;
14303 +#endif
14304 +
14305 + thread->sp0 ^= time;
14306 + load_sp0(init_tss + smp_processor_id(), thread);
14307
14308 +#ifdef CONFIG_X86_64
14309 + percpu_write(kernel_stack, thread->sp0);
14310 +#endif
14311 +}
14312 +#endif
14313 diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14314 --- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14315 +++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14316 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14317 unsigned long addr, unsigned long data)
14318 {
14319 int ret;
14320 - unsigned long __user *datap = (unsigned long __user *)data;
14321 + unsigned long __user *datap = (__force unsigned long __user *)data;
14322
14323 switch (request) {
14324 /* read the word at location addr in the USER area. */
14325 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14326 if ((int) addr < 0)
14327 return -EIO;
14328 ret = do_get_thread_area(child, addr,
14329 - (struct user_desc __user *)data);
14330 + (__force struct user_desc __user *) data);
14331 break;
14332
14333 case PTRACE_SET_THREAD_AREA:
14334 if ((int) addr < 0)
14335 return -EIO;
14336 ret = do_set_thread_area(child, addr,
14337 - (struct user_desc __user *)data, 0);
14338 + (__force struct user_desc __user *) data, 0);
14339 break;
14340 #endif
14341
14342 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14343 memset(info, 0, sizeof(*info));
14344 info->si_signo = SIGTRAP;
14345 info->si_code = si_code;
14346 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14347 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14348 }
14349
14350 void user_single_step_siginfo(struct task_struct *tsk,
14351 diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14352 --- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14353 +++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14354 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14355 return pv_tsc_khz;
14356 }
14357
14358 -static atomic64_t last_value = ATOMIC64_INIT(0);
14359 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14360
14361 void pvclock_resume(void)
14362 {
14363 - atomic64_set(&last_value, 0);
14364 + atomic64_set_unchecked(&last_value, 0);
14365 }
14366
14367 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14368 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14369 * updating at the same time, and one of them could be slightly behind,
14370 * making the assumption that last_value always go forward fail to hold.
14371 */
14372 - last = atomic64_read(&last_value);
14373 + last = atomic64_read_unchecked(&last_value);
14374 do {
14375 if (ret < last)
14376 return last;
14377 - last = atomic64_cmpxchg(&last_value, last, ret);
14378 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14379 } while (unlikely(last != ret));
14380
14381 return ret;
14382 diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14383 --- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14384 +++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14385 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14386 EXPORT_SYMBOL(pm_power_off);
14387
14388 static const struct desc_ptr no_idt = {};
14389 -static int reboot_mode;
14390 +static unsigned short reboot_mode;
14391 enum reboot_type reboot_type = BOOT_ACPI;
14392 int reboot_force;
14393
14394 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14395 extern const unsigned char machine_real_restart_asm[];
14396 extern const u64 machine_real_restart_gdt[3];
14397
14398 -void machine_real_restart(unsigned int type)
14399 +__noreturn void machine_real_restart(unsigned int type)
14400 {
14401 void *restart_va;
14402 unsigned long restart_pa;
14403 - void (*restart_lowmem)(unsigned int);
14404 + void (* __noreturn restart_lowmem)(unsigned int);
14405 u64 *lowmem_gdt;
14406
14407 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14408 + struct desc_struct *gdt;
14409 +#endif
14410 +
14411 local_irq_disable();
14412
14413 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14414 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14415 boot)". This seems like a fairly standard thing that gets set by
14416 REBOOT.COM programs, and the previous reset routine did this
14417 too. */
14418 - *((unsigned short *)0x472) = reboot_mode;
14419 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14420
14421 /* Patch the GDT in the low memory trampoline */
14422 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14423
14424 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14425 restart_pa = virt_to_phys(restart_va);
14426 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14427 + restart_lowmem = (void *)restart_pa;
14428
14429 /* GDT[0]: GDT self-pointer */
14430 lowmem_gdt[0] =
14431 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14432 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14433
14434 /* Jump to the identity-mapped low memory code */
14435 +
14436 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14437 + gdt = get_cpu_gdt_table(smp_processor_id());
14438 + pax_open_kernel();
14439 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14440 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14441 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14442 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14443 +#endif
14444 +#ifdef CONFIG_PAX_KERNEXEC
14445 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14446 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14447 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14448 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14449 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14450 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14451 +#endif
14452 + pax_close_kernel();
14453 +#endif
14454 +
14455 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14456 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14457 + unreachable();
14458 +#else
14459 restart_lowmem(type);
14460 +#endif
14461 +
14462 }
14463 #ifdef CONFIG_APM_MODULE
14464 EXPORT_SYMBOL(machine_real_restart);
14465 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14466 * try to force a triple fault and then cycle between hitting the keyboard
14467 * controller and doing that
14468 */
14469 -static void native_machine_emergency_restart(void)
14470 +__noreturn static void native_machine_emergency_restart(void)
14471 {
14472 int i;
14473 int attempt = 0;
14474 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14475 #endif
14476 }
14477
14478 -static void __machine_emergency_restart(int emergency)
14479 +static __noreturn void __machine_emergency_restart(int emergency)
14480 {
14481 reboot_emergency = emergency;
14482 machine_ops.emergency_restart();
14483 }
14484
14485 -static void native_machine_restart(char *__unused)
14486 +static __noreturn void native_machine_restart(char *__unused)
14487 {
14488 printk("machine restart\n");
14489
14490 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14491 __machine_emergency_restart(0);
14492 }
14493
14494 -static void native_machine_halt(void)
14495 +static __noreturn void native_machine_halt(void)
14496 {
14497 /* stop other cpus and apics */
14498 machine_shutdown();
14499 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14500 stop_this_cpu(NULL);
14501 }
14502
14503 -static void native_machine_power_off(void)
14504 +__noreturn static void native_machine_power_off(void)
14505 {
14506 if (pm_power_off) {
14507 if (!reboot_force)
14508 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14509 }
14510 /* a fallback in case there is no PM info available */
14511 tboot_shutdown(TB_SHUTDOWN_HALT);
14512 + unreachable();
14513 }
14514
14515 struct machine_ops machine_ops = {
14516 diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14517 --- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14518 +++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14519 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14520 * area (640->1Mb) as ram even though it is not.
14521 * take them out.
14522 */
14523 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14524 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14525 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14526 }
14527
14528 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14529
14530 if (!boot_params.hdr.root_flags)
14531 root_mountflags &= ~MS_RDONLY;
14532 - init_mm.start_code = (unsigned long) _text;
14533 - init_mm.end_code = (unsigned long) _etext;
14534 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14535 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14536 init_mm.end_data = (unsigned long) _edata;
14537 init_mm.brk = _brk_end;
14538
14539 - code_resource.start = virt_to_phys(_text);
14540 - code_resource.end = virt_to_phys(_etext)-1;
14541 - data_resource.start = virt_to_phys(_etext);
14542 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14543 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14544 + data_resource.start = virt_to_phys(_sdata);
14545 data_resource.end = virt_to_phys(_edata)-1;
14546 bss_resource.start = virt_to_phys(&__bss_start);
14547 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14548 diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14549 --- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14550 +++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14551 @@ -21,19 +21,17 @@
14552 #include <asm/cpu.h>
14553 #include <asm/stackprotector.h>
14554
14555 -DEFINE_PER_CPU(int, cpu_number);
14556 +#ifdef CONFIG_SMP
14557 +DEFINE_PER_CPU(unsigned int, cpu_number);
14558 EXPORT_PER_CPU_SYMBOL(cpu_number);
14559 +#endif
14560
14561 -#ifdef CONFIG_X86_64
14562 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14563 -#else
14564 -#define BOOT_PERCPU_OFFSET 0
14565 -#endif
14566
14567 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14568 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14569
14570 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14571 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14572 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14573 };
14574 EXPORT_SYMBOL(__per_cpu_offset);
14575 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14576 {
14577 #ifdef CONFIG_X86_32
14578 struct desc_struct gdt;
14579 + unsigned long base = per_cpu_offset(cpu);
14580
14581 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14582 - 0x2 | DESCTYPE_S, 0x8);
14583 - gdt.s = 1;
14584 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14585 + 0x83 | DESCTYPE_S, 0xC);
14586 write_gdt_entry(get_cpu_gdt_table(cpu),
14587 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14588 #endif
14589 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14590 /* alrighty, percpu areas up and running */
14591 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14592 for_each_possible_cpu(cpu) {
14593 +#ifdef CONFIG_CC_STACKPROTECTOR
14594 +#ifdef CONFIG_X86_32
14595 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14596 +#endif
14597 +#endif
14598 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14599 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14600 per_cpu(cpu_number, cpu) = cpu;
14601 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14602 */
14603 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14604 #endif
14605 +#ifdef CONFIG_CC_STACKPROTECTOR
14606 +#ifdef CONFIG_X86_32
14607 + if (!cpu)
14608 + per_cpu(stack_canary.canary, cpu) = canary;
14609 +#endif
14610 +#endif
14611 /*
14612 * Up to this point, the boot CPU has been using .init.data
14613 * area. Reload any changed state for the boot CPU.
14614 diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14615 --- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14616 +++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14617 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14618 * Align the stack pointer according to the i386 ABI,
14619 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14620 */
14621 - sp = ((sp + 4) & -16ul) - 4;
14622 + sp = ((sp - 12) & -16ul) - 4;
14623 #else /* !CONFIG_X86_32 */
14624 sp = round_down(sp, 16) - 8;
14625 #endif
14626 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14627 * Return an always-bogus address instead so we will die with SIGSEGV.
14628 */
14629 if (onsigstack && !likely(on_sig_stack(sp)))
14630 - return (void __user *)-1L;
14631 + return (__force void __user *)-1L;
14632
14633 /* save i387 state */
14634 if (used_math() && save_i387_xstate(*fpstate) < 0)
14635 - return (void __user *)-1L;
14636 + return (__force void __user *)-1L;
14637
14638 return (void __user *)sp;
14639 }
14640 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14641 }
14642
14643 if (current->mm->context.vdso)
14644 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14645 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14646 else
14647 - restorer = &frame->retcode;
14648 + restorer = (void __user *)&frame->retcode;
14649 if (ka->sa.sa_flags & SA_RESTORER)
14650 restorer = ka->sa.sa_restorer;
14651
14652 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14653 * reasons and because gdb uses it as a signature to notice
14654 * signal handler stack frames.
14655 */
14656 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14657 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14658
14659 if (err)
14660 return -EFAULT;
14661 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14662 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14663
14664 /* Set up to return from userspace. */
14665 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14666 + if (current->mm->context.vdso)
14667 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14668 + else
14669 + restorer = (void __user *)&frame->retcode;
14670 if (ka->sa.sa_flags & SA_RESTORER)
14671 restorer = ka->sa.sa_restorer;
14672 put_user_ex(restorer, &frame->pretcode);
14673 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14674 * reasons and because gdb uses it as a signature to notice
14675 * signal handler stack frames.
14676 */
14677 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14678 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14679 } put_user_catch(err);
14680
14681 if (err)
14682 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14683 int signr;
14684 sigset_t *oldset;
14685
14686 + pax_track_stack();
14687 +
14688 /*
14689 * We want the common case to go fast, which is why we may in certain
14690 * cases get here from kernel mode. Just return without doing anything
14691 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14692 * X86_32: vm86 regs switched out by assembly code before reaching
14693 * here, so testing against kernel CS suffices.
14694 */
14695 - if (!user_mode(regs))
14696 + if (!user_mode_novm(regs))
14697 return;
14698
14699 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14700 diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14701 --- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14702 +++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14703 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14704 set_idle_for_cpu(cpu, c_idle.idle);
14705 do_rest:
14706 per_cpu(current_task, cpu) = c_idle.idle;
14707 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14708 #ifdef CONFIG_X86_32
14709 /* Stack for startup_32 can be just as for start_secondary onwards */
14710 irq_ctx_init(cpu);
14711 #else
14712 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14713 initial_gs = per_cpu_offset(cpu);
14714 - per_cpu(kernel_stack, cpu) =
14715 - (unsigned long)task_stack_page(c_idle.idle) -
14716 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14717 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14718 #endif
14719 +
14720 + pax_open_kernel();
14721 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14722 + pax_close_kernel();
14723 +
14724 initial_code = (unsigned long)start_secondary;
14725 stack_start = c_idle.idle->thread.sp;
14726
14727 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14728
14729 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14730
14731 +#ifdef CONFIG_PAX_PER_CPU_PGD
14732 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14733 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14734 + KERNEL_PGD_PTRS);
14735 +#endif
14736 +
14737 err = do_boot_cpu(apicid, cpu);
14738 if (err) {
14739 pr_debug("do_boot_cpu failed %d\n", err);
14740 diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14741 --- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14742 +++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14743 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14744 struct desc_struct *desc;
14745 unsigned long base;
14746
14747 - seg &= ~7UL;
14748 + seg >>= 3;
14749
14750 mutex_lock(&child->mm->context.lock);
14751 - if (unlikely((seg >> 3) >= child->mm->context.size))
14752 + if (unlikely(seg >= child->mm->context.size))
14753 addr = -1L; /* bogus selector, access would fault */
14754 else {
14755 desc = child->mm->context.ldt + seg;
14756 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14757 addr += base;
14758 }
14759 mutex_unlock(&child->mm->context.lock);
14760 - }
14761 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14762 + addr = ktla_ktva(addr);
14763
14764 return addr;
14765 }
14766 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14767 unsigned char opcode[15];
14768 unsigned long addr = convert_ip_to_linear(child, regs);
14769
14770 + if (addr == -EINVAL)
14771 + return 0;
14772 +
14773 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14774 for (i = 0; i < copied; i++) {
14775 switch (opcode[i]) {
14776 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14777
14778 #ifdef CONFIG_X86_64
14779 case 0x40 ... 0x4f:
14780 - if (regs->cs != __USER_CS)
14781 + if ((regs->cs & 0xffff) != __USER_CS)
14782 /* 32-bit mode: register increment */
14783 return 0;
14784 /* 64-bit mode: REX prefix */
14785 diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14786 --- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14787 +++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14788 @@ -1,3 +1,4 @@
14789 +.section .rodata,"a",@progbits
14790 ENTRY(sys_call_table)
14791 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14792 .long sys_exit
14793 diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14794 --- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14795 +++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14796 @@ -24,17 +24,224 @@
14797
14798 #include <asm/syscalls.h>
14799
14800 -/*
14801 - * Do a system call from kernel instead of calling sys_execve so we
14802 - * end up with proper pt_regs.
14803 - */
14804 -int kernel_execve(const char *filename,
14805 - const char *const argv[],
14806 - const char *const envp[])
14807 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14808 {
14809 - long __res;
14810 - asm volatile ("int $0x80"
14811 - : "=a" (__res)
14812 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14813 - return __res;
14814 + unsigned long pax_task_size = TASK_SIZE;
14815 +
14816 +#ifdef CONFIG_PAX_SEGMEXEC
14817 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14818 + pax_task_size = SEGMEXEC_TASK_SIZE;
14819 +#endif
14820 +
14821 + if (len > pax_task_size || addr > pax_task_size - len)
14822 + return -EINVAL;
14823 +
14824 + return 0;
14825 +}
14826 +
14827 +unsigned long
14828 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14829 + unsigned long len, unsigned long pgoff, unsigned long flags)
14830 +{
14831 + struct mm_struct *mm = current->mm;
14832 + struct vm_area_struct *vma;
14833 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14834 +
14835 +#ifdef CONFIG_PAX_SEGMEXEC
14836 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14837 + pax_task_size = SEGMEXEC_TASK_SIZE;
14838 +#endif
14839 +
14840 + pax_task_size -= PAGE_SIZE;
14841 +
14842 + if (len > pax_task_size)
14843 + return -ENOMEM;
14844 +
14845 + if (flags & MAP_FIXED)
14846 + return addr;
14847 +
14848 +#ifdef CONFIG_PAX_RANDMMAP
14849 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14850 +#endif
14851 +
14852 + if (addr) {
14853 + addr = PAGE_ALIGN(addr);
14854 + if (pax_task_size - len >= addr) {
14855 + vma = find_vma(mm, addr);
14856 + if (check_heap_stack_gap(vma, addr, len))
14857 + return addr;
14858 + }
14859 + }
14860 + if (len > mm->cached_hole_size) {
14861 + start_addr = addr = mm->free_area_cache;
14862 + } else {
14863 + start_addr = addr = mm->mmap_base;
14864 + mm->cached_hole_size = 0;
14865 + }
14866 +
14867 +#ifdef CONFIG_PAX_PAGEEXEC
14868 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14869 + start_addr = 0x00110000UL;
14870 +
14871 +#ifdef CONFIG_PAX_RANDMMAP
14872 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14873 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14874 +#endif
14875 +
14876 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14877 + start_addr = addr = mm->mmap_base;
14878 + else
14879 + addr = start_addr;
14880 + }
14881 +#endif
14882 +
14883 +full_search:
14884 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14885 + /* At this point: (!vma || addr < vma->vm_end). */
14886 + if (pax_task_size - len < addr) {
14887 + /*
14888 + * Start a new search - just in case we missed
14889 + * some holes.
14890 + */
14891 + if (start_addr != mm->mmap_base) {
14892 + start_addr = addr = mm->mmap_base;
14893 + mm->cached_hole_size = 0;
14894 + goto full_search;
14895 + }
14896 + return -ENOMEM;
14897 + }
14898 + if (check_heap_stack_gap(vma, addr, len))
14899 + break;
14900 + if (addr + mm->cached_hole_size < vma->vm_start)
14901 + mm->cached_hole_size = vma->vm_start - addr;
14902 + addr = vma->vm_end;
14903 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14904 + start_addr = addr = mm->mmap_base;
14905 + mm->cached_hole_size = 0;
14906 + goto full_search;
14907 + }
14908 + }
14909 +
14910 + /*
14911 + * Remember the place where we stopped the search:
14912 + */
14913 + mm->free_area_cache = addr + len;
14914 + return addr;
14915 +}
14916 +
14917 +unsigned long
14918 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14919 + const unsigned long len, const unsigned long pgoff,
14920 + const unsigned long flags)
14921 +{
14922 + struct vm_area_struct *vma;
14923 + struct mm_struct *mm = current->mm;
14924 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14925 +
14926 +#ifdef CONFIG_PAX_SEGMEXEC
14927 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14928 + pax_task_size = SEGMEXEC_TASK_SIZE;
14929 +#endif
14930 +
14931 + pax_task_size -= PAGE_SIZE;
14932 +
14933 + /* requested length too big for entire address space */
14934 + if (len > pax_task_size)
14935 + return -ENOMEM;
14936 +
14937 + if (flags & MAP_FIXED)
14938 + return addr;
14939 +
14940 +#ifdef CONFIG_PAX_PAGEEXEC
14941 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14942 + goto bottomup;
14943 +#endif
14944 +
14945 +#ifdef CONFIG_PAX_RANDMMAP
14946 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14947 +#endif
14948 +
14949 + /* requesting a specific address */
14950 + if (addr) {
14951 + addr = PAGE_ALIGN(addr);
14952 + if (pax_task_size - len >= addr) {
14953 + vma = find_vma(mm, addr);
14954 + if (check_heap_stack_gap(vma, addr, len))
14955 + return addr;
14956 + }
14957 + }
14958 +
14959 + /* check if free_area_cache is useful for us */
14960 + if (len <= mm->cached_hole_size) {
14961 + mm->cached_hole_size = 0;
14962 + mm->free_area_cache = mm->mmap_base;
14963 + }
14964 +
14965 + /* either no address requested or can't fit in requested address hole */
14966 + addr = mm->free_area_cache;
14967 +
14968 + /* make sure it can fit in the remaining address space */
14969 + if (addr > len) {
14970 + vma = find_vma(mm, addr-len);
14971 + if (check_heap_stack_gap(vma, addr - len, len))
14972 + /* remember the address as a hint for next time */
14973 + return (mm->free_area_cache = addr-len);
14974 + }
14975 +
14976 + if (mm->mmap_base < len)
14977 + goto bottomup;
14978 +
14979 + addr = mm->mmap_base-len;
14980 +
14981 + do {
14982 + /*
14983 + * Lookup failure means no vma is above this address,
14984 + * else if new region fits below vma->vm_start,
14985 + * return with success:
14986 + */
14987 + vma = find_vma(mm, addr);
14988 + if (check_heap_stack_gap(vma, addr, len))
14989 + /* remember the address as a hint for next time */
14990 + return (mm->free_area_cache = addr);
14991 +
14992 + /* remember the largest hole we saw so far */
14993 + if (addr + mm->cached_hole_size < vma->vm_start)
14994 + mm->cached_hole_size = vma->vm_start - addr;
14995 +
14996 + /* try just below the current vma->vm_start */
14997 + addr = skip_heap_stack_gap(vma, len);
14998 + } while (!IS_ERR_VALUE(addr));
14999 +
15000 +bottomup:
15001 + /*
15002 + * A failed mmap() very likely causes application failure,
15003 + * so fall back to the bottom-up function here. This scenario
15004 + * can happen with large stack limits and large mmap()
15005 + * allocations.
15006 + */
15007 +
15008 +#ifdef CONFIG_PAX_SEGMEXEC
15009 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15010 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15011 + else
15012 +#endif
15013 +
15014 + mm->mmap_base = TASK_UNMAPPED_BASE;
15015 +
15016 +#ifdef CONFIG_PAX_RANDMMAP
15017 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15018 + mm->mmap_base += mm->delta_mmap;
15019 +#endif
15020 +
15021 + mm->free_area_cache = mm->mmap_base;
15022 + mm->cached_hole_size = ~0UL;
15023 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15024 + /*
15025 + * Restore the topdown base:
15026 + */
15027 + mm->mmap_base = base;
15028 + mm->free_area_cache = base;
15029 + mm->cached_hole_size = ~0UL;
15030 +
15031 + return addr;
15032 }
15033 diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15034 --- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15035 +++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15036 @@ -32,8 +32,8 @@ out:
15037 return error;
15038 }
15039
15040 -static void find_start_end(unsigned long flags, unsigned long *begin,
15041 - unsigned long *end)
15042 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15043 + unsigned long *begin, unsigned long *end)
15044 {
15045 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15046 unsigned long new_begin;
15047 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15048 *begin = new_begin;
15049 }
15050 } else {
15051 - *begin = TASK_UNMAPPED_BASE;
15052 + *begin = mm->mmap_base;
15053 *end = TASK_SIZE;
15054 }
15055 }
15056 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15057 if (flags & MAP_FIXED)
15058 return addr;
15059
15060 - find_start_end(flags, &begin, &end);
15061 + find_start_end(mm, flags, &begin, &end);
15062
15063 if (len > end)
15064 return -ENOMEM;
15065
15066 +#ifdef CONFIG_PAX_RANDMMAP
15067 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15068 +#endif
15069 +
15070 if (addr) {
15071 addr = PAGE_ALIGN(addr);
15072 vma = find_vma(mm, addr);
15073 - if (end - len >= addr &&
15074 - (!vma || addr + len <= vma->vm_start))
15075 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15076 return addr;
15077 }
15078 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15079 @@ -106,7 +109,7 @@ full_search:
15080 }
15081 return -ENOMEM;
15082 }
15083 - if (!vma || addr + len <= vma->vm_start) {
15084 + if (check_heap_stack_gap(vma, addr, len)) {
15085 /*
15086 * Remember the place where we stopped the search:
15087 */
15088 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15089 {
15090 struct vm_area_struct *vma;
15091 struct mm_struct *mm = current->mm;
15092 - unsigned long addr = addr0;
15093 + unsigned long base = mm->mmap_base, addr = addr0;
15094
15095 /* requested length too big for entire address space */
15096 if (len > TASK_SIZE)
15097 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15098 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15099 goto bottomup;
15100
15101 +#ifdef CONFIG_PAX_RANDMMAP
15102 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15103 +#endif
15104 +
15105 /* requesting a specific address */
15106 if (addr) {
15107 addr = PAGE_ALIGN(addr);
15108 - vma = find_vma(mm, addr);
15109 - if (TASK_SIZE - len >= addr &&
15110 - (!vma || addr + len <= vma->vm_start))
15111 - return addr;
15112 + if (TASK_SIZE - len >= addr) {
15113 + vma = find_vma(mm, addr);
15114 + if (check_heap_stack_gap(vma, addr, len))
15115 + return addr;
15116 + }
15117 }
15118
15119 /* check if free_area_cache is useful for us */
15120 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15121 /* make sure it can fit in the remaining address space */
15122 if (addr > len) {
15123 vma = find_vma(mm, addr-len);
15124 - if (!vma || addr <= vma->vm_start)
15125 + if (check_heap_stack_gap(vma, addr - len, len))
15126 /* remember the address as a hint for next time */
15127 return mm->free_area_cache = addr-len;
15128 }
15129 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15130 * return with success:
15131 */
15132 vma = find_vma(mm, addr);
15133 - if (!vma || addr+len <= vma->vm_start)
15134 + if (check_heap_stack_gap(vma, addr, len))
15135 /* remember the address as a hint for next time */
15136 return mm->free_area_cache = addr;
15137
15138 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15139 mm->cached_hole_size = vma->vm_start - addr;
15140
15141 /* try just below the current vma->vm_start */
15142 - addr = vma->vm_start-len;
15143 - } while (len < vma->vm_start);
15144 + addr = skip_heap_stack_gap(vma, len);
15145 + } while (!IS_ERR_VALUE(addr));
15146
15147 bottomup:
15148 /*
15149 @@ -198,13 +206,21 @@ bottomup:
15150 * can happen with large stack limits and large mmap()
15151 * allocations.
15152 */
15153 + mm->mmap_base = TASK_UNMAPPED_BASE;
15154 +
15155 +#ifdef CONFIG_PAX_RANDMMAP
15156 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15157 + mm->mmap_base += mm->delta_mmap;
15158 +#endif
15159 +
15160 + mm->free_area_cache = mm->mmap_base;
15161 mm->cached_hole_size = ~0UL;
15162 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15163 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15164 /*
15165 * Restore the topdown base:
15166 */
15167 - mm->free_area_cache = mm->mmap_base;
15168 + mm->mmap_base = base;
15169 + mm->free_area_cache = base;
15170 mm->cached_hole_size = ~0UL;
15171
15172 return addr;
15173 diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15174 --- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15175 +++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15176 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15177
15178 void tboot_shutdown(u32 shutdown_type)
15179 {
15180 - void (*shutdown)(void);
15181 + void (* __noreturn shutdown)(void);
15182
15183 if (!tboot_enabled())
15184 return;
15185 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15186
15187 switch_to_tboot_pt();
15188
15189 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15190 + shutdown = (void *)tboot->shutdown_entry;
15191 shutdown();
15192
15193 /* should not reach here */
15194 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15195 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15196 }
15197
15198 -static atomic_t ap_wfs_count;
15199 +static atomic_unchecked_t ap_wfs_count;
15200
15201 static int tboot_wait_for_aps(int num_aps)
15202 {
15203 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15204 {
15205 switch (action) {
15206 case CPU_DYING:
15207 - atomic_inc(&ap_wfs_count);
15208 + atomic_inc_unchecked(&ap_wfs_count);
15209 if (num_online_cpus() == 1)
15210 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15211 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15212 return NOTIFY_BAD;
15213 break;
15214 }
15215 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15216
15217 tboot_create_trampoline();
15218
15219 - atomic_set(&ap_wfs_count, 0);
15220 + atomic_set_unchecked(&ap_wfs_count, 0);
15221 register_hotcpu_notifier(&tboot_cpu_notifier);
15222 return 0;
15223 }
15224 diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15225 --- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15226 +++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15227 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15228 {
15229 unsigned long pc = instruction_pointer(regs);
15230
15231 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15232 + if (!user_mode(regs) && in_lock_functions(pc)) {
15233 #ifdef CONFIG_FRAME_POINTER
15234 - return *(unsigned long *)(regs->bp + sizeof(long));
15235 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15236 #else
15237 unsigned long *sp =
15238 (unsigned long *)kernel_stack_pointer(regs);
15239 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15240 * or above a saved flags. Eflags has bits 22-31 zero,
15241 * kernel addresses don't.
15242 */
15243 +
15244 +#ifdef CONFIG_PAX_KERNEXEC
15245 + return ktla_ktva(sp[0]);
15246 +#else
15247 if (sp[0] >> 22)
15248 return sp[0];
15249 if (sp[1] >> 22)
15250 return sp[1];
15251 #endif
15252 +
15253 +#endif
15254 }
15255 return pc;
15256 }
15257 diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15258 --- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15259 +++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15260 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15261 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15262 return -EINVAL;
15263
15264 +#ifdef CONFIG_PAX_SEGMEXEC
15265 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15266 + return -EINVAL;
15267 +#endif
15268 +
15269 set_tls_desc(p, idx, &info, 1);
15270
15271 return 0;
15272 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15273 --- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15274 +++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15275 @@ -32,6 +32,12 @@
15276 #include <asm/segment.h>
15277 #include <asm/page_types.h>
15278
15279 +#ifdef CONFIG_PAX_KERNEXEC
15280 +#define ta(X) (X)
15281 +#else
15282 +#define ta(X) ((X) - __PAGE_OFFSET)
15283 +#endif
15284 +
15285 #ifdef CONFIG_SMP
15286
15287 .section ".x86_trampoline","a"
15288 @@ -62,7 +68,7 @@ r_base = .
15289 inc %ax # protected mode (PE) bit
15290 lmsw %ax # into protected mode
15291 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15292 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15293 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15294
15295 # These need to be in the same 64K segment as the above;
15296 # hence we don't use the boot_gdt_descr defined in head.S
15297 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15298 --- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15299 +++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15300 @@ -90,7 +90,7 @@ startup_32:
15301 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15302 movl %eax, %ds
15303
15304 - movl $X86_CR4_PAE, %eax
15305 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15306 movl %eax, %cr4 # Enable PAE mode
15307
15308 # Setup trampoline 4 level pagetables
15309 @@ -138,7 +138,7 @@ tidt:
15310 # so the kernel can live anywhere
15311 .balign 4
15312 tgdt:
15313 - .short tgdt_end - tgdt # gdt limit
15314 + .short tgdt_end - tgdt - 1 # gdt limit
15315 .long tgdt - r_base
15316 .short 0
15317 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15318 diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15319 --- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15320 +++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15321 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15322
15323 /* Do we ignore FPU interrupts ? */
15324 char ignore_fpu_irq;
15325 -
15326 -/*
15327 - * The IDT has to be page-aligned to simplify the Pentium
15328 - * F0 0F bug workaround.
15329 - */
15330 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15331 #endif
15332
15333 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15334 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15335 }
15336
15337 static void __kprobes
15338 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15339 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15340 long error_code, siginfo_t *info)
15341 {
15342 struct task_struct *tsk = current;
15343
15344 #ifdef CONFIG_X86_32
15345 - if (regs->flags & X86_VM_MASK) {
15346 + if (v8086_mode(regs)) {
15347 /*
15348 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15349 * On nmi (interrupt 2), do_trap should not be called.
15350 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15351 }
15352 #endif
15353
15354 - if (!user_mode(regs))
15355 + if (!user_mode_novm(regs))
15356 goto kernel_trap;
15357
15358 #ifdef CONFIG_X86_32
15359 @@ -157,7 +151,7 @@ trap_signal:
15360 printk_ratelimit()) {
15361 printk(KERN_INFO
15362 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15363 - tsk->comm, tsk->pid, str,
15364 + tsk->comm, task_pid_nr(tsk), str,
15365 regs->ip, regs->sp, error_code);
15366 print_vma_addr(" in ", regs->ip);
15367 printk("\n");
15368 @@ -174,8 +168,20 @@ kernel_trap:
15369 if (!fixup_exception(regs)) {
15370 tsk->thread.error_code = error_code;
15371 tsk->thread.trap_no = trapnr;
15372 +
15373 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15374 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15375 + str = "PAX: suspicious stack segment fault";
15376 +#endif
15377 +
15378 die(str, regs, error_code);
15379 }
15380 +
15381 +#ifdef CONFIG_PAX_REFCOUNT
15382 + if (trapnr == 4)
15383 + pax_report_refcount_overflow(regs);
15384 +#endif
15385 +
15386 return;
15387
15388 #ifdef CONFIG_X86_32
15389 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15390 conditional_sti(regs);
15391
15392 #ifdef CONFIG_X86_32
15393 - if (regs->flags & X86_VM_MASK)
15394 + if (v8086_mode(regs))
15395 goto gp_in_vm86;
15396 #endif
15397
15398 tsk = current;
15399 - if (!user_mode(regs))
15400 + if (!user_mode_novm(regs))
15401 goto gp_in_kernel;
15402
15403 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15404 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15405 + struct mm_struct *mm = tsk->mm;
15406 + unsigned long limit;
15407 +
15408 + down_write(&mm->mmap_sem);
15409 + limit = mm->context.user_cs_limit;
15410 + if (limit < TASK_SIZE) {
15411 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15412 + up_write(&mm->mmap_sem);
15413 + return;
15414 + }
15415 + up_write(&mm->mmap_sem);
15416 + }
15417 +#endif
15418 +
15419 tsk->thread.error_code = error_code;
15420 tsk->thread.trap_no = 13;
15421
15422 @@ -304,6 +326,13 @@ gp_in_kernel:
15423 if (notify_die(DIE_GPF, "general protection fault", regs,
15424 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15425 return;
15426 +
15427 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15429 + die("PAX: suspicious general protection fault", regs, error_code);
15430 + else
15431 +#endif
15432 +
15433 die("general protection fault", regs, error_code);
15434 }
15435
15436 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15437 dotraplinkage notrace __kprobes void
15438 do_nmi(struct pt_regs *regs, long error_code)
15439 {
15440 +
15441 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15442 + if (!user_mode(regs)) {
15443 + unsigned long cs = regs->cs & 0xFFFF;
15444 + unsigned long ip = ktva_ktla(regs->ip);
15445 +
15446 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15447 + regs->ip = ip;
15448 + }
15449 +#endif
15450 +
15451 nmi_enter();
15452
15453 inc_irq_stat(__nmi_count);
15454 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15455 /* It's safe to allow irq's after DR6 has been saved */
15456 preempt_conditional_sti(regs);
15457
15458 - if (regs->flags & X86_VM_MASK) {
15459 + if (v8086_mode(regs)) {
15460 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15461 error_code, 1);
15462 preempt_conditional_cli(regs);
15463 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15464 * We already checked v86 mode above, so we can check for kernel mode
15465 * by just checking the CPL of CS.
15466 */
15467 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15468 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15469 tsk->thread.debugreg6 &= ~DR_STEP;
15470 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15471 regs->flags &= ~X86_EFLAGS_TF;
15472 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15473 return;
15474 conditional_sti(regs);
15475
15476 - if (!user_mode_vm(regs))
15477 + if (!user_mode(regs))
15478 {
15479 if (!fixup_exception(regs)) {
15480 task->thread.error_code = error_code;
15481 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15482 void __math_state_restore(void)
15483 {
15484 struct thread_info *thread = current_thread_info();
15485 - struct task_struct *tsk = thread->task;
15486 + struct task_struct *tsk = current;
15487
15488 /*
15489 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15490 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15491 */
15492 asmlinkage void math_state_restore(void)
15493 {
15494 - struct thread_info *thread = current_thread_info();
15495 - struct task_struct *tsk = thread->task;
15496 + struct task_struct *tsk = current;
15497
15498 if (!tsk_used_math(tsk)) {
15499 local_irq_enable();
15500 diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15501 --- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15502 +++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15503 @@ -20,6 +20,7 @@
15504 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15505 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15506 * arch/x86/kernel/head_32.S: processor startup
15507 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15508 *
15509 * verify_cpu, returns the status of longmode and SSE in register %eax.
15510 * 0: Success 1: Failure
15511 diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15512 --- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15513 +++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15514 @@ -41,6 +41,7 @@
15515 #include <linux/ptrace.h>
15516 #include <linux/audit.h>
15517 #include <linux/stddef.h>
15518 +#include <linux/grsecurity.h>
15519
15520 #include <asm/uaccess.h>
15521 #include <asm/io.h>
15522 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15523 do_exit(SIGSEGV);
15524 }
15525
15526 - tss = &per_cpu(init_tss, get_cpu());
15527 + tss = init_tss + get_cpu();
15528 current->thread.sp0 = current->thread.saved_sp0;
15529 current->thread.sysenter_cs = __KERNEL_CS;
15530 load_sp0(tss, &current->thread);
15531 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15532 struct task_struct *tsk;
15533 int tmp, ret = -EPERM;
15534
15535 +#ifdef CONFIG_GRKERNSEC_VM86
15536 + if (!capable(CAP_SYS_RAWIO)) {
15537 + gr_handle_vm86();
15538 + goto out;
15539 + }
15540 +#endif
15541 +
15542 tsk = current;
15543 if (tsk->thread.saved_sp0)
15544 goto out;
15545 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15546 int tmp, ret;
15547 struct vm86plus_struct __user *v86;
15548
15549 +#ifdef CONFIG_GRKERNSEC_VM86
15550 + if (!capable(CAP_SYS_RAWIO)) {
15551 + gr_handle_vm86();
15552 + ret = -EPERM;
15553 + goto out;
15554 + }
15555 +#endif
15556 +
15557 tsk = current;
15558 switch (cmd) {
15559 case VM86_REQUEST_IRQ:
15560 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15561 tsk->thread.saved_fs = info->regs32->fs;
15562 tsk->thread.saved_gs = get_user_gs(info->regs32);
15563
15564 - tss = &per_cpu(init_tss, get_cpu());
15565 + tss = init_tss + get_cpu();
15566 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15567 if (cpu_has_sep)
15568 tsk->thread.sysenter_cs = 0;
15569 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15570 goto cannot_handle;
15571 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15572 goto cannot_handle;
15573 - intr_ptr = (unsigned long __user *) (i << 2);
15574 + intr_ptr = (__force unsigned long __user *) (i << 2);
15575 if (get_user(segoffs, intr_ptr))
15576 goto cannot_handle;
15577 if ((segoffs >> 16) == BIOSSEG)
15578 diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15579 --- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15580 +++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15581 @@ -26,6 +26,13 @@
15582 #include <asm/page_types.h>
15583 #include <asm/cache.h>
15584 #include <asm/boot.h>
15585 +#include <asm/segment.h>
15586 +
15587 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15588 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15589 +#else
15590 +#define __KERNEL_TEXT_OFFSET 0
15591 +#endif
15592
15593 #undef i386 /* in case the preprocessor is a 32bit one */
15594
15595 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15596
15597 PHDRS {
15598 text PT_LOAD FLAGS(5); /* R_E */
15599 +#ifdef CONFIG_X86_32
15600 + module PT_LOAD FLAGS(5); /* R_E */
15601 +#endif
15602 +#ifdef CONFIG_XEN
15603 + rodata PT_LOAD FLAGS(5); /* R_E */
15604 +#else
15605 + rodata PT_LOAD FLAGS(4); /* R__ */
15606 +#endif
15607 data PT_LOAD FLAGS(6); /* RW_ */
15608 #ifdef CONFIG_X86_64
15609 user PT_LOAD FLAGS(5); /* R_E */
15610 +#endif
15611 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15612 #ifdef CONFIG_SMP
15613 percpu PT_LOAD FLAGS(6); /* RW_ */
15614 #endif
15615 + text.init PT_LOAD FLAGS(5); /* R_E */
15616 + text.exit PT_LOAD FLAGS(5); /* R_E */
15617 init PT_LOAD FLAGS(7); /* RWE */
15618 -#endif
15619 note PT_NOTE FLAGS(0); /* ___ */
15620 }
15621
15622 SECTIONS
15623 {
15624 #ifdef CONFIG_X86_32
15625 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15626 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15627 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15628 #else
15629 - . = __START_KERNEL;
15630 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15631 + . = __START_KERNEL;
15632 #endif
15633
15634 /* Text and read-only data */
15635 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15636 - _text = .;
15637 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15638 /* bootstrapping code */
15639 +#ifdef CONFIG_X86_32
15640 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15641 +#else
15642 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15643 +#endif
15644 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15645 + _text = .;
15646 HEAD_TEXT
15647 #ifdef CONFIG_X86_32
15648 . = ALIGN(PAGE_SIZE);
15649 @@ -109,13 +131,47 @@ SECTIONS
15650 IRQENTRY_TEXT
15651 *(.fixup)
15652 *(.gnu.warning)
15653 - /* End of text section */
15654 - _etext = .;
15655 } :text = 0x9090
15656
15657 - NOTES :text :note
15658 + . += __KERNEL_TEXT_OFFSET;
15659 +
15660 +#ifdef CONFIG_X86_32
15661 + . = ALIGN(PAGE_SIZE);
15662 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15663 +
15664 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15665 + MODULES_EXEC_VADDR = .;
15666 + BYTE(0)
15667 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15668 + . = ALIGN(HPAGE_SIZE);
15669 + MODULES_EXEC_END = . - 1;
15670 +#endif
15671 +
15672 + } :module
15673 +#endif
15674 +
15675 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15676 + /* End of text section */
15677 + _etext = . - __KERNEL_TEXT_OFFSET;
15678 + }
15679 +
15680 +#ifdef CONFIG_X86_32
15681 + . = ALIGN(PAGE_SIZE);
15682 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15683 + *(.idt)
15684 + . = ALIGN(PAGE_SIZE);
15685 + *(.empty_zero_page)
15686 + *(.initial_pg_fixmap)
15687 + *(.initial_pg_pmd)
15688 + *(.initial_page_table)
15689 + *(.swapper_pg_dir)
15690 + } :rodata
15691 +#endif
15692 +
15693 + . = ALIGN(PAGE_SIZE);
15694 + NOTES :rodata :note
15695
15696 - EXCEPTION_TABLE(16) :text = 0x9090
15697 + EXCEPTION_TABLE(16) :rodata
15698
15699 #if defined(CONFIG_DEBUG_RODATA)
15700 /* .text should occupy whole number of pages */
15701 @@ -127,16 +183,20 @@ SECTIONS
15702
15703 /* Data */
15704 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15705 +
15706 +#ifdef CONFIG_PAX_KERNEXEC
15707 + . = ALIGN(HPAGE_SIZE);
15708 +#else
15709 + . = ALIGN(PAGE_SIZE);
15710 +#endif
15711 +
15712 /* Start of data section */
15713 _sdata = .;
15714
15715 /* init_task */
15716 INIT_TASK_DATA(THREAD_SIZE)
15717
15718 -#ifdef CONFIG_X86_32
15719 - /* 32 bit has nosave before _edata */
15720 NOSAVE_DATA
15721 -#endif
15722
15723 PAGE_ALIGNED_DATA(PAGE_SIZE)
15724
15725 @@ -208,12 +268,19 @@ SECTIONS
15726 #endif /* CONFIG_X86_64 */
15727
15728 /* Init code and data - will be freed after init */
15729 - . = ALIGN(PAGE_SIZE);
15730 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15731 + BYTE(0)
15732 +
15733 +#ifdef CONFIG_PAX_KERNEXEC
15734 + . = ALIGN(HPAGE_SIZE);
15735 +#else
15736 + . = ALIGN(PAGE_SIZE);
15737 +#endif
15738 +
15739 __init_begin = .; /* paired with __init_end */
15740 - }
15741 + } :init.begin
15742
15743 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15744 +#ifdef CONFIG_SMP
15745 /*
15746 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15747 * output PHDR, so the next output section - .init.text - should
15748 @@ -222,12 +289,27 @@ SECTIONS
15749 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15750 #endif
15751
15752 - INIT_TEXT_SECTION(PAGE_SIZE)
15753 -#ifdef CONFIG_X86_64
15754 - :init
15755 -#endif
15756 + . = ALIGN(PAGE_SIZE);
15757 + init_begin = .;
15758 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15759 + VMLINUX_SYMBOL(_sinittext) = .;
15760 + INIT_TEXT
15761 + VMLINUX_SYMBOL(_einittext) = .;
15762 + . = ALIGN(PAGE_SIZE);
15763 + } :text.init
15764
15765 - INIT_DATA_SECTION(16)
15766 + /*
15767 + * .exit.text is discard at runtime, not link time, to deal with
15768 + * references from .altinstructions and .eh_frame
15769 + */
15770 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15771 + EXIT_TEXT
15772 + . = ALIGN(16);
15773 + } :text.exit
15774 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15775 +
15776 + . = ALIGN(PAGE_SIZE);
15777 + INIT_DATA_SECTION(16) :init
15778
15779 /*
15780 * Code and data for a variety of lowlevel trampolines, to be
15781 @@ -301,19 +383,12 @@ SECTIONS
15782 }
15783
15784 . = ALIGN(8);
15785 - /*
15786 - * .exit.text is discard at runtime, not link time, to deal with
15787 - * references from .altinstructions and .eh_frame
15788 - */
15789 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15790 - EXIT_TEXT
15791 - }
15792
15793 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15794 EXIT_DATA
15795 }
15796
15797 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15798 +#ifndef CONFIG_SMP
15799 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15800 #endif
15801
15802 @@ -332,16 +407,10 @@ SECTIONS
15803 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15804 __smp_locks = .;
15805 *(.smp_locks)
15806 - . = ALIGN(PAGE_SIZE);
15807 __smp_locks_end = .;
15808 + . = ALIGN(PAGE_SIZE);
15809 }
15810
15811 -#ifdef CONFIG_X86_64
15812 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15813 - NOSAVE_DATA
15814 - }
15815 -#endif
15816 -
15817 /* BSS */
15818 . = ALIGN(PAGE_SIZE);
15819 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15820 @@ -357,6 +426,7 @@ SECTIONS
15821 __brk_base = .;
15822 . += 64 * 1024; /* 64k alignment slop space */
15823 *(.brk_reservation) /* areas brk users have reserved */
15824 + . = ALIGN(HPAGE_SIZE);
15825 __brk_limit = .;
15826 }
15827
15828 @@ -383,13 +453,12 @@ SECTIONS
15829 * for the boot processor.
15830 */
15831 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15832 -INIT_PER_CPU(gdt_page);
15833 INIT_PER_CPU(irq_stack_union);
15834
15835 /*
15836 * Build-time check on the image size:
15837 */
15838 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15839 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15840 "kernel image bigger than KERNEL_IMAGE_SIZE");
15841
15842 #ifdef CONFIG_SMP
15843 diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15844 --- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15845 +++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15846 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15847 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15848 {
15849 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15850 - .sysctl_enabled = 1,
15851 + .sysctl_enabled = 0,
15852 };
15853
15854 void update_vsyscall_tz(void)
15855 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15856 static ctl_table kernel_table2[] = {
15857 { .procname = "vsyscall64",
15858 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15859 - .mode = 0644,
15860 + .mode = 0444,
15861 .proc_handler = proc_dointvec },
15862 {}
15863 };
15864 diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15865 --- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15866 +++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15867 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15868 EXPORT_SYMBOL(copy_user_generic_string);
15869 EXPORT_SYMBOL(copy_user_generic_unrolled);
15870 EXPORT_SYMBOL(__copy_user_nocache);
15871 -EXPORT_SYMBOL(_copy_from_user);
15872 -EXPORT_SYMBOL(_copy_to_user);
15873
15874 EXPORT_SYMBOL(copy_page);
15875 EXPORT_SYMBOL(clear_page);
15876 diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15877 --- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15878 +++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15879 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15880 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15881 return -EINVAL;
15882
15883 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15884 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15885 fx_sw_user->extended_size -
15886 FP_XSTATE_MAGIC2_SIZE));
15887 if (err)
15888 @@ -267,7 +267,7 @@ fx_only:
15889 * the other extended state.
15890 */
15891 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15892 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15893 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15894 }
15895
15896 /*
15897 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15898 if (use_xsave())
15899 err = restore_user_xstate(buf);
15900 else
15901 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15902 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15903 buf);
15904 if (unlikely(err)) {
15905 /*
15906 diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15907 --- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15908 +++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15909 @@ -96,7 +96,7 @@
15910 #define Src2ImmByte (2<<29)
15911 #define Src2One (3<<29)
15912 #define Src2Imm (4<<29)
15913 -#define Src2Mask (7<<29)
15914 +#define Src2Mask (7U<<29)
15915
15916 #define X2(x...) x, x
15917 #define X3(x...) X2(x), x
15918 @@ -207,6 +207,7 @@ struct gprefix {
15919
15920 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15921 do { \
15922 + unsigned long _tmp; \
15923 __asm__ __volatile__ ( \
15924 _PRE_EFLAGS("0", "4", "2") \
15925 _op _suffix " %"_x"3,%1; " \
15926 @@ -220,8 +221,6 @@ struct gprefix {
15927 /* Raw emulation: instruction has two explicit operands. */
15928 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15929 do { \
15930 - unsigned long _tmp; \
15931 - \
15932 switch ((_dst).bytes) { \
15933 case 2: \
15934 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15935 @@ -237,7 +236,6 @@ struct gprefix {
15936
15937 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15938 do { \
15939 - unsigned long _tmp; \
15940 switch ((_dst).bytes) { \
15941 case 1: \
15942 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15943 diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
15944 --- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15945 +++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15946 @@ -53,7 +53,7 @@
15947 #define APIC_BUS_CYCLE_NS 1
15948
15949 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15950 -#define apic_debug(fmt, arg...)
15951 +#define apic_debug(fmt, arg...) do {} while (0)
15952
15953 #define APIC_LVT_NUM 6
15954 /* 14 is the version for Xeon and Pentium 8.4.8*/
15955 diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
15956 --- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15957 +++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15958 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15959
15960 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15961
15962 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15963 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15964
15965 /*
15966 * Assume that the pte write on a page table of the same type
15967 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968 }
15969
15970 spin_lock(&vcpu->kvm->mmu_lock);
15971 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15972 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15973 gentry = 0;
15974 kvm_mmu_free_some_pages(vcpu);
15975 ++vcpu->kvm->stat.mmu_pte_write;
15976 diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
15977 --- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15978 +++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15979 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15980 unsigned long mmu_seq;
15981 bool map_writable;
15982
15983 + pax_track_stack();
15984 +
15985 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15986
15987 r = mmu_topup_memory_caches(vcpu);
15988 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15989 if (need_flush)
15990 kvm_flush_remote_tlbs(vcpu->kvm);
15991
15992 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15993 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15994
15995 spin_unlock(&vcpu->kvm->mmu_lock);
15996
15997 diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
15998 --- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15999 +++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16000 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16001 int cpu = raw_smp_processor_id();
16002
16003 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16004 +
16005 + pax_open_kernel();
16006 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16007 + pax_close_kernel();
16008 +
16009 load_TR_desc();
16010 }
16011
16012 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16013 #endif
16014 #endif
16015
16016 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16017 + __set_fs(current_thread_info()->addr_limit);
16018 +#endif
16019 +
16020 reload_tss(vcpu);
16021
16022 local_irq_disable();
16023 diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16024 --- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16025 +++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16026 @@ -797,7 +797,11 @@ static void reload_tss(void)
16027 struct desc_struct *descs;
16028
16029 descs = (void *)gdt->address;
16030 +
16031 + pax_open_kernel();
16032 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16033 + pax_close_kernel();
16034 +
16035 load_TR_desc();
16036 }
16037
16038 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16039 if (!cpu_has_vmx_flexpriority())
16040 flexpriority_enabled = 0;
16041
16042 - if (!cpu_has_vmx_tpr_shadow())
16043 - kvm_x86_ops->update_cr8_intercept = NULL;
16044 + if (!cpu_has_vmx_tpr_shadow()) {
16045 + pax_open_kernel();
16046 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16047 + pax_close_kernel();
16048 + }
16049
16050 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16051 kvm_disable_largepages();
16052 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16053 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16054
16055 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16056 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16057 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16058 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16059 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16060 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16061 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16062 "jmp .Lkvm_vmx_return \n\t"
16063 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16064 ".Lkvm_vmx_return: "
16065 +
16066 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16067 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16068 + ".Lkvm_vmx_return2: "
16069 +#endif
16070 +
16071 /* Save guest registers, load host registers, keep flags */
16072 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16073 "pop %0 \n\t"
16074 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16075 #endif
16076 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16077 [wordsize]"i"(sizeof(ulong))
16078 +
16079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16080 + ,[cs]"i"(__KERNEL_CS)
16081 +#endif
16082 +
16083 : "cc", "memory"
16084 , R"ax", R"bx", R"di", R"si"
16085 #ifdef CONFIG_X86_64
16086 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16087
16088 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16089
16090 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16091 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16092 +
16093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16094 + loadsegment(fs, __KERNEL_PERCPU);
16095 +#endif
16096 +
16097 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16098 + __set_fs(current_thread_info()->addr_limit);
16099 +#endif
16100 +
16101 vmx->launched = 1;
16102
16103 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16104 diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16105 --- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16106 +++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16107 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16108 if (n < msr_list.nmsrs)
16109 goto out;
16110 r = -EFAULT;
16111 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16112 + goto out;
16113 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16114 num_msrs_to_save * sizeof(u32)))
16115 goto out;
16116 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16117 struct kvm_cpuid2 *cpuid,
16118 struct kvm_cpuid_entry2 __user *entries)
16119 {
16120 - int r;
16121 + int r, i;
16122
16123 r = -E2BIG;
16124 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16125 goto out;
16126 r = -EFAULT;
16127 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16128 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16129 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16130 goto out;
16131 + for (i = 0; i < cpuid->nent; ++i) {
16132 + struct kvm_cpuid_entry2 cpuid_entry;
16133 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16134 + goto out;
16135 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16136 + }
16137 vcpu->arch.cpuid_nent = cpuid->nent;
16138 kvm_apic_set_version(vcpu);
16139 kvm_x86_ops->cpuid_update(vcpu);
16140 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16141 struct kvm_cpuid2 *cpuid,
16142 struct kvm_cpuid_entry2 __user *entries)
16143 {
16144 - int r;
16145 + int r, i;
16146
16147 r = -E2BIG;
16148 if (cpuid->nent < vcpu->arch.cpuid_nent)
16149 goto out;
16150 r = -EFAULT;
16151 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16152 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16153 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16154 goto out;
16155 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16156 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16157 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16158 + goto out;
16159 + }
16160 return 0;
16161
16162 out:
16163 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16164 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16165 struct kvm_interrupt *irq)
16166 {
16167 - if (irq->irq < 0 || irq->irq >= 256)
16168 + if (irq->irq >= 256)
16169 return -EINVAL;
16170 if (irqchip_in_kernel(vcpu->kvm))
16171 return -ENXIO;
16172 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16173 }
16174 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16175
16176 -int kvm_arch_init(void *opaque)
16177 +int kvm_arch_init(const void *opaque)
16178 {
16179 int r;
16180 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16181 diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16182 --- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16183 +++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16184 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16185 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16186 * Launcher to reboot us.
16187 */
16188 -static void lguest_restart(char *reason)
16189 +static __noreturn void lguest_restart(char *reason)
16190 {
16191 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16192 + BUG();
16193 }
16194
16195 /*G:050
16196 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16197 --- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16198 +++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16199 @@ -8,18 +8,30 @@
16200
16201 long long atomic64_read_cx8(long long, const atomic64_t *v);
16202 EXPORT_SYMBOL(atomic64_read_cx8);
16203 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16204 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16205 long long atomic64_set_cx8(long long, const atomic64_t *v);
16206 EXPORT_SYMBOL(atomic64_set_cx8);
16207 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16208 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16209 long long atomic64_xchg_cx8(long long, unsigned high);
16210 EXPORT_SYMBOL(atomic64_xchg_cx8);
16211 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16212 EXPORT_SYMBOL(atomic64_add_return_cx8);
16213 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16215 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16216 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16217 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16219 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16220 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16221 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16222 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16223 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16224 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16225 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16226 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16227 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16228 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16229 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16230 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16231 #ifndef CONFIG_X86_CMPXCHG64
16232 long long atomic64_read_386(long long, const atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_read_386);
16234 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16235 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16236 long long atomic64_set_386(long long, const atomic64_t *v);
16237 EXPORT_SYMBOL(atomic64_set_386);
16238 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16239 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16240 long long atomic64_xchg_386(long long, unsigned high);
16241 EXPORT_SYMBOL(atomic64_xchg_386);
16242 long long atomic64_add_return_386(long long a, atomic64_t *v);
16243 EXPORT_SYMBOL(atomic64_add_return_386);
16244 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16246 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16247 EXPORT_SYMBOL(atomic64_sub_return_386);
16248 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16250 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16251 EXPORT_SYMBOL(atomic64_inc_return_386);
16252 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16253 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16254 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16255 EXPORT_SYMBOL(atomic64_dec_return_386);
16256 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16257 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16258 long long atomic64_add_386(long long a, atomic64_t *v);
16259 EXPORT_SYMBOL(atomic64_add_386);
16260 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16261 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16262 long long atomic64_sub_386(long long a, atomic64_t *v);
16263 EXPORT_SYMBOL(atomic64_sub_386);
16264 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16265 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16266 long long atomic64_inc_386(long long a, atomic64_t *v);
16267 EXPORT_SYMBOL(atomic64_inc_386);
16268 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16269 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16270 long long atomic64_dec_386(long long a, atomic64_t *v);
16271 EXPORT_SYMBOL(atomic64_dec_386);
16272 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16273 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16274 long long atomic64_dec_if_positive_386(atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16276 int atomic64_inc_not_zero_386(atomic64_t *v);
16277 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16278 --- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16279 +++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16280 @@ -48,6 +48,10 @@ BEGIN(read)
16281 movl (v), %eax
16282 movl 4(v), %edx
16283 RET_ENDP
16284 +BEGIN(read_unchecked)
16285 + movl (v), %eax
16286 + movl 4(v), %edx
16287 +RET_ENDP
16288 #undef v
16289
16290 #define v %esi
16291 @@ -55,6 +59,10 @@ BEGIN(set)
16292 movl %ebx, (v)
16293 movl %ecx, 4(v)
16294 RET_ENDP
16295 +BEGIN(set_unchecked)
16296 + movl %ebx, (v)
16297 + movl %ecx, 4(v)
16298 +RET_ENDP
16299 #undef v
16300
16301 #define v %esi
16302 @@ -70,6 +78,20 @@ RET_ENDP
16303 BEGIN(add)
16304 addl %eax, (v)
16305 adcl %edx, 4(v)
16306 +
16307 +#ifdef CONFIG_PAX_REFCOUNT
16308 + jno 0f
16309 + subl %eax, (v)
16310 + sbbl %edx, 4(v)
16311 + int $4
16312 +0:
16313 + _ASM_EXTABLE(0b, 0b)
16314 +#endif
16315 +
16316 +RET_ENDP
16317 +BEGIN(add_unchecked)
16318 + addl %eax, (v)
16319 + adcl %edx, 4(v)
16320 RET_ENDP
16321 #undef v
16322
16323 @@ -77,6 +99,24 @@ RET_ENDP
16324 BEGIN(add_return)
16325 addl (v), %eax
16326 adcl 4(v), %edx
16327 +
16328 +#ifdef CONFIG_PAX_REFCOUNT
16329 + into
16330 +1234:
16331 + _ASM_EXTABLE(1234b, 2f)
16332 +#endif
16333 +
16334 + movl %eax, (v)
16335 + movl %edx, 4(v)
16336 +
16337 +#ifdef CONFIG_PAX_REFCOUNT
16338 +2:
16339 +#endif
16340 +
16341 +RET_ENDP
16342 +BEGIN(add_return_unchecked)
16343 + addl (v), %eax
16344 + adcl 4(v), %edx
16345 movl %eax, (v)
16346 movl %edx, 4(v)
16347 RET_ENDP
16348 @@ -86,6 +126,20 @@ RET_ENDP
16349 BEGIN(sub)
16350 subl %eax, (v)
16351 sbbl %edx, 4(v)
16352 +
16353 +#ifdef CONFIG_PAX_REFCOUNT
16354 + jno 0f
16355 + addl %eax, (v)
16356 + adcl %edx, 4(v)
16357 + int $4
16358 +0:
16359 + _ASM_EXTABLE(0b, 0b)
16360 +#endif
16361 +
16362 +RET_ENDP
16363 +BEGIN(sub_unchecked)
16364 + subl %eax, (v)
16365 + sbbl %edx, 4(v)
16366 RET_ENDP
16367 #undef v
16368
16369 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16370 sbbl $0, %edx
16371 addl (v), %eax
16372 adcl 4(v), %edx
16373 +
16374 +#ifdef CONFIG_PAX_REFCOUNT
16375 + into
16376 +1234:
16377 + _ASM_EXTABLE(1234b, 2f)
16378 +#endif
16379 +
16380 + movl %eax, (v)
16381 + movl %edx, 4(v)
16382 +
16383 +#ifdef CONFIG_PAX_REFCOUNT
16384 +2:
16385 +#endif
16386 +
16387 +RET_ENDP
16388 +BEGIN(sub_return_unchecked)
16389 + negl %edx
16390 + negl %eax
16391 + sbbl $0, %edx
16392 + addl (v), %eax
16393 + adcl 4(v), %edx
16394 movl %eax, (v)
16395 movl %edx, 4(v)
16396 RET_ENDP
16397 @@ -105,6 +180,20 @@ RET_ENDP
16398 BEGIN(inc)
16399 addl $1, (v)
16400 adcl $0, 4(v)
16401 +
16402 +#ifdef CONFIG_PAX_REFCOUNT
16403 + jno 0f
16404 + subl $1, (v)
16405 + sbbl $0, 4(v)
16406 + int $4
16407 +0:
16408 + _ASM_EXTABLE(0b, 0b)
16409 +#endif
16410 +
16411 +RET_ENDP
16412 +BEGIN(inc_unchecked)
16413 + addl $1, (v)
16414 + adcl $0, 4(v)
16415 RET_ENDP
16416 #undef v
16417
16418 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16419 movl 4(v), %edx
16420 addl $1, %eax
16421 adcl $0, %edx
16422 +
16423 +#ifdef CONFIG_PAX_REFCOUNT
16424 + into
16425 +1234:
16426 + _ASM_EXTABLE(1234b, 2f)
16427 +#endif
16428 +
16429 + movl %eax, (v)
16430 + movl %edx, 4(v)
16431 +
16432 +#ifdef CONFIG_PAX_REFCOUNT
16433 +2:
16434 +#endif
16435 +
16436 +RET_ENDP
16437 +BEGIN(inc_return_unchecked)
16438 + movl (v), %eax
16439 + movl 4(v), %edx
16440 + addl $1, %eax
16441 + adcl $0, %edx
16442 movl %eax, (v)
16443 movl %edx, 4(v)
16444 RET_ENDP
16445 @@ -123,6 +232,20 @@ RET_ENDP
16446 BEGIN(dec)
16447 subl $1, (v)
16448 sbbl $0, 4(v)
16449 +
16450 +#ifdef CONFIG_PAX_REFCOUNT
16451 + jno 0f
16452 + addl $1, (v)
16453 + adcl $0, 4(v)
16454 + int $4
16455 +0:
16456 + _ASM_EXTABLE(0b, 0b)
16457 +#endif
16458 +
16459 +RET_ENDP
16460 +BEGIN(dec_unchecked)
16461 + subl $1, (v)
16462 + sbbl $0, 4(v)
16463 RET_ENDP
16464 #undef v
16465
16466 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16467 movl 4(v), %edx
16468 subl $1, %eax
16469 sbbl $0, %edx
16470 +
16471 +#ifdef CONFIG_PAX_REFCOUNT
16472 + into
16473 +1234:
16474 + _ASM_EXTABLE(1234b, 2f)
16475 +#endif
16476 +
16477 + movl %eax, (v)
16478 + movl %edx, 4(v)
16479 +
16480 +#ifdef CONFIG_PAX_REFCOUNT
16481 +2:
16482 +#endif
16483 +
16484 +RET_ENDP
16485 +BEGIN(dec_return_unchecked)
16486 + movl (v), %eax
16487 + movl 4(v), %edx
16488 + subl $1, %eax
16489 + sbbl $0, %edx
16490 movl %eax, (v)
16491 movl %edx, 4(v)
16492 RET_ENDP
16493 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16494 adcl %edx, %edi
16495 addl (v), %eax
16496 adcl 4(v), %edx
16497 +
16498 +#ifdef CONFIG_PAX_REFCOUNT
16499 + into
16500 +1234:
16501 + _ASM_EXTABLE(1234b, 2f)
16502 +#endif
16503 +
16504 cmpl %eax, %esi
16505 je 3f
16506 1:
16507 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16508 1:
16509 addl $1, %eax
16510 adcl $0, %edx
16511 +
16512 +#ifdef CONFIG_PAX_REFCOUNT
16513 + into
16514 +1234:
16515 + _ASM_EXTABLE(1234b, 2f)
16516 +#endif
16517 +
16518 movl %eax, (v)
16519 movl %edx, 4(v)
16520 movl $1, %eax
16521 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16522 movl 4(v), %edx
16523 subl $1, %eax
16524 sbbl $0, %edx
16525 +
16526 +#ifdef CONFIG_PAX_REFCOUNT
16527 + into
16528 +1234:
16529 + _ASM_EXTABLE(1234b, 1f)
16530 +#endif
16531 +
16532 js 1f
16533 movl %eax, (v)
16534 movl %edx, 4(v)
16535 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16536 --- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16537 +++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16538 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16539 CFI_ENDPROC
16540 ENDPROC(atomic64_read_cx8)
16541
16542 +ENTRY(atomic64_read_unchecked_cx8)
16543 + CFI_STARTPROC
16544 +
16545 + read64 %ecx
16546 + ret
16547 + CFI_ENDPROC
16548 +ENDPROC(atomic64_read_unchecked_cx8)
16549 +
16550 ENTRY(atomic64_set_cx8)
16551 CFI_STARTPROC
16552
16553 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16554 CFI_ENDPROC
16555 ENDPROC(atomic64_set_cx8)
16556
16557 +ENTRY(atomic64_set_unchecked_cx8)
16558 + CFI_STARTPROC
16559 +
16560 +1:
16561 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16562 + * are atomic on 586 and newer */
16563 + cmpxchg8b (%esi)
16564 + jne 1b
16565 +
16566 + ret
16567 + CFI_ENDPROC
16568 +ENDPROC(atomic64_set_unchecked_cx8)
16569 +
16570 ENTRY(atomic64_xchg_cx8)
16571 CFI_STARTPROC
16572
16573 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16574 CFI_ENDPROC
16575 ENDPROC(atomic64_xchg_cx8)
16576
16577 -.macro addsub_return func ins insc
16578 -ENTRY(atomic64_\func\()_return_cx8)
16579 +.macro addsub_return func ins insc unchecked=""
16580 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16581 CFI_STARTPROC
16582 SAVE ebp
16583 SAVE ebx
16584 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16585 movl %edx, %ecx
16586 \ins\()l %esi, %ebx
16587 \insc\()l %edi, %ecx
16588 +
16589 +.ifb \unchecked
16590 +#ifdef CONFIG_PAX_REFCOUNT
16591 + into
16592 +2:
16593 + _ASM_EXTABLE(2b, 3f)
16594 +#endif
16595 +.endif
16596 +
16597 LOCK_PREFIX
16598 cmpxchg8b (%ebp)
16599 jne 1b
16600 -
16601 -10:
16602 movl %ebx, %eax
16603 movl %ecx, %edx
16604 +
16605 +.ifb \unchecked
16606 +#ifdef CONFIG_PAX_REFCOUNT
16607 +3:
16608 +#endif
16609 +.endif
16610 +
16611 RESTORE edi
16612 RESTORE esi
16613 RESTORE ebx
16614 RESTORE ebp
16615 ret
16616 CFI_ENDPROC
16617 -ENDPROC(atomic64_\func\()_return_cx8)
16618 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16619 .endm
16620
16621 addsub_return add add adc
16622 addsub_return sub sub sbb
16623 +addsub_return add add adc _unchecked
16624 +addsub_return sub sub sbb _unchecked
16625
16626 -.macro incdec_return func ins insc
16627 -ENTRY(atomic64_\func\()_return_cx8)
16628 +.macro incdec_return func ins insc unchecked
16629 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16630 CFI_STARTPROC
16631 SAVE ebx
16632
16633 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16634 movl %edx, %ecx
16635 \ins\()l $1, %ebx
16636 \insc\()l $0, %ecx
16637 +
16638 +.ifb \unchecked
16639 +#ifdef CONFIG_PAX_REFCOUNT
16640 + into
16641 +2:
16642 + _ASM_EXTABLE(2b, 3f)
16643 +#endif
16644 +.endif
16645 +
16646 LOCK_PREFIX
16647 cmpxchg8b (%esi)
16648 jne 1b
16649
16650 -10:
16651 movl %ebx, %eax
16652 movl %ecx, %edx
16653 +
16654 +.ifb \unchecked
16655 +#ifdef CONFIG_PAX_REFCOUNT
16656 +3:
16657 +#endif
16658 +.endif
16659 +
16660 RESTORE ebx
16661 ret
16662 CFI_ENDPROC
16663 -ENDPROC(atomic64_\func\()_return_cx8)
16664 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16665 .endm
16666
16667 incdec_return inc add adc
16668 incdec_return dec sub sbb
16669 +incdec_return inc add adc _unchecked
16670 +incdec_return dec sub sbb _unchecked
16671
16672 ENTRY(atomic64_dec_if_positive_cx8)
16673 CFI_STARTPROC
16674 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16675 movl %edx, %ecx
16676 subl $1, %ebx
16677 sbb $0, %ecx
16678 +
16679 +#ifdef CONFIG_PAX_REFCOUNT
16680 + into
16681 +1234:
16682 + _ASM_EXTABLE(1234b, 2f)
16683 +#endif
16684 +
16685 js 2f
16686 LOCK_PREFIX
16687 cmpxchg8b (%esi)
16688 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16689 movl %edx, %ecx
16690 addl %esi, %ebx
16691 adcl %edi, %ecx
16692 +
16693 +#ifdef CONFIG_PAX_REFCOUNT
16694 + into
16695 +1234:
16696 + _ASM_EXTABLE(1234b, 3f)
16697 +#endif
16698 +
16699 LOCK_PREFIX
16700 cmpxchg8b (%ebp)
16701 jne 1b
16702 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16703 movl %edx, %ecx
16704 addl $1, %ebx
16705 adcl $0, %ecx
16706 +
16707 +#ifdef CONFIG_PAX_REFCOUNT
16708 + into
16709 +1234:
16710 + _ASM_EXTABLE(1234b, 3f)
16711 +#endif
16712 +
16713 LOCK_PREFIX
16714 cmpxchg8b (%esi)
16715 jne 1b
16716 diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16717 --- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16718 +++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16719 @@ -28,7 +28,8 @@
16720 #include <linux/linkage.h>
16721 #include <asm/dwarf2.h>
16722 #include <asm/errno.h>
16723 -
16724 +#include <asm/segment.h>
16725 +
16726 /*
16727 * computes a partial checksum, e.g. for TCP/UDP fragments
16728 */
16729 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16730
16731 #define ARGBASE 16
16732 #define FP 12
16733 -
16734 -ENTRY(csum_partial_copy_generic)
16735 +
16736 +ENTRY(csum_partial_copy_generic_to_user)
16737 CFI_STARTPROC
16738 +
16739 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16740 + pushl_cfi %gs
16741 + popl_cfi %es
16742 + jmp csum_partial_copy_generic
16743 +#endif
16744 +
16745 +ENTRY(csum_partial_copy_generic_from_user)
16746 +
16747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16748 + pushl_cfi %gs
16749 + popl_cfi %ds
16750 +#endif
16751 +
16752 +ENTRY(csum_partial_copy_generic)
16753 subl $4,%esp
16754 CFI_ADJUST_CFA_OFFSET 4
16755 pushl_cfi %edi
16756 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16757 jmp 4f
16758 SRC(1: movw (%esi), %bx )
16759 addl $2, %esi
16760 -DST( movw %bx, (%edi) )
16761 +DST( movw %bx, %es:(%edi) )
16762 addl $2, %edi
16763 addw %bx, %ax
16764 adcl $0, %eax
16765 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16766 SRC(1: movl (%esi), %ebx )
16767 SRC( movl 4(%esi), %edx )
16768 adcl %ebx, %eax
16769 -DST( movl %ebx, (%edi) )
16770 +DST( movl %ebx, %es:(%edi) )
16771 adcl %edx, %eax
16772 -DST( movl %edx, 4(%edi) )
16773 +DST( movl %edx, %es:4(%edi) )
16774
16775 SRC( movl 8(%esi), %ebx )
16776 SRC( movl 12(%esi), %edx )
16777 adcl %ebx, %eax
16778 -DST( movl %ebx, 8(%edi) )
16779 +DST( movl %ebx, %es:8(%edi) )
16780 adcl %edx, %eax
16781 -DST( movl %edx, 12(%edi) )
16782 +DST( movl %edx, %es:12(%edi) )
16783
16784 SRC( movl 16(%esi), %ebx )
16785 SRC( movl 20(%esi), %edx )
16786 adcl %ebx, %eax
16787 -DST( movl %ebx, 16(%edi) )
16788 +DST( movl %ebx, %es:16(%edi) )
16789 adcl %edx, %eax
16790 -DST( movl %edx, 20(%edi) )
16791 +DST( movl %edx, %es:20(%edi) )
16792
16793 SRC( movl 24(%esi), %ebx )
16794 SRC( movl 28(%esi), %edx )
16795 adcl %ebx, %eax
16796 -DST( movl %ebx, 24(%edi) )
16797 +DST( movl %ebx, %es:24(%edi) )
16798 adcl %edx, %eax
16799 -DST( movl %edx, 28(%edi) )
16800 +DST( movl %edx, %es:28(%edi) )
16801
16802 lea 32(%esi), %esi
16803 lea 32(%edi), %edi
16804 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16805 shrl $2, %edx # This clears CF
16806 SRC(3: movl (%esi), %ebx )
16807 adcl %ebx, %eax
16808 -DST( movl %ebx, (%edi) )
16809 +DST( movl %ebx, %es:(%edi) )
16810 lea 4(%esi), %esi
16811 lea 4(%edi), %edi
16812 dec %edx
16813 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16814 jb 5f
16815 SRC( movw (%esi), %cx )
16816 leal 2(%esi), %esi
16817 -DST( movw %cx, (%edi) )
16818 +DST( movw %cx, %es:(%edi) )
16819 leal 2(%edi), %edi
16820 je 6f
16821 shll $16,%ecx
16822 SRC(5: movb (%esi), %cl )
16823 -DST( movb %cl, (%edi) )
16824 +DST( movb %cl, %es:(%edi) )
16825 6: addl %ecx, %eax
16826 adcl $0, %eax
16827 7:
16828 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16829
16830 6001:
16831 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16832 - movl $-EFAULT, (%ebx)
16833 + movl $-EFAULT, %ss:(%ebx)
16834
16835 # zero the complete destination - computing the rest
16836 # is too much work
16837 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16838
16839 6002:
16840 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16841 - movl $-EFAULT,(%ebx)
16842 + movl $-EFAULT,%ss:(%ebx)
16843 jmp 5000b
16844
16845 .previous
16846
16847 + pushl_cfi %ss
16848 + popl_cfi %ds
16849 + pushl_cfi %ss
16850 + popl_cfi %es
16851 popl_cfi %ebx
16852 CFI_RESTORE ebx
16853 popl_cfi %esi
16854 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16855 popl_cfi %ecx # equivalent to addl $4,%esp
16856 ret
16857 CFI_ENDPROC
16858 -ENDPROC(csum_partial_copy_generic)
16859 +ENDPROC(csum_partial_copy_generic_to_user)
16860
16861 #else
16862
16863 /* Version for PentiumII/PPro */
16864
16865 #define ROUND1(x) \
16866 + nop; nop; nop; \
16867 SRC(movl x(%esi), %ebx ) ; \
16868 addl %ebx, %eax ; \
16869 - DST(movl %ebx, x(%edi) ) ;
16870 + DST(movl %ebx, %es:x(%edi)) ;
16871
16872 #define ROUND(x) \
16873 + nop; nop; nop; \
16874 SRC(movl x(%esi), %ebx ) ; \
16875 adcl %ebx, %eax ; \
16876 - DST(movl %ebx, x(%edi) ) ;
16877 + DST(movl %ebx, %es:x(%edi)) ;
16878
16879 #define ARGBASE 12
16880 -
16881 -ENTRY(csum_partial_copy_generic)
16882 +
16883 +ENTRY(csum_partial_copy_generic_to_user)
16884 CFI_STARTPROC
16885 +
16886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16887 + pushl_cfi %gs
16888 + popl_cfi %es
16889 + jmp csum_partial_copy_generic
16890 +#endif
16891 +
16892 +ENTRY(csum_partial_copy_generic_from_user)
16893 +
16894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16895 + pushl_cfi %gs
16896 + popl_cfi %ds
16897 +#endif
16898 +
16899 +ENTRY(csum_partial_copy_generic)
16900 pushl_cfi %ebx
16901 CFI_REL_OFFSET ebx, 0
16902 pushl_cfi %edi
16903 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16904 subl %ebx, %edi
16905 lea -1(%esi),%edx
16906 andl $-32,%edx
16907 - lea 3f(%ebx,%ebx), %ebx
16908 + lea 3f(%ebx,%ebx,2), %ebx
16909 testl %esi, %esi
16910 jmp *%ebx
16911 1: addl $64,%esi
16912 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16913 jb 5f
16914 SRC( movw (%esi), %dx )
16915 leal 2(%esi), %esi
16916 -DST( movw %dx, (%edi) )
16917 +DST( movw %dx, %es:(%edi) )
16918 leal 2(%edi), %edi
16919 je 6f
16920 shll $16,%edx
16921 5:
16922 SRC( movb (%esi), %dl )
16923 -DST( movb %dl, (%edi) )
16924 +DST( movb %dl, %es:(%edi) )
16925 6: addl %edx, %eax
16926 adcl $0, %eax
16927 7:
16928 .section .fixup, "ax"
16929 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16930 - movl $-EFAULT, (%ebx)
16931 + movl $-EFAULT, %ss:(%ebx)
16932 # zero the complete destination (computing the rest is too much work)
16933 movl ARGBASE+8(%esp),%edi # dst
16934 movl ARGBASE+12(%esp),%ecx # len
16935 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16936 rep; stosb
16937 jmp 7b
16938 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16939 - movl $-EFAULT, (%ebx)
16940 + movl $-EFAULT, %ss:(%ebx)
16941 jmp 7b
16942 .previous
16943
16944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16945 + pushl_cfi %ss
16946 + popl_cfi %ds
16947 + pushl_cfi %ss
16948 + popl_cfi %es
16949 +#endif
16950 +
16951 popl_cfi %esi
16952 CFI_RESTORE esi
16953 popl_cfi %edi
16954 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16955 CFI_RESTORE ebx
16956 ret
16957 CFI_ENDPROC
16958 -ENDPROC(csum_partial_copy_generic)
16959 +ENDPROC(csum_partial_copy_generic_to_user)
16960
16961 #undef ROUND
16962 #undef ROUND1
16963 diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
16964 --- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16965 +++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16966 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16967
16968 #include <asm/cpufeature.h>
16969
16970 - .section .altinstr_replacement,"ax"
16971 + .section .altinstr_replacement,"a"
16972 1: .byte 0xeb /* jmp <disp8> */
16973 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16974 2: .byte 0xeb /* jmp <disp8> */
16975 diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
16976 --- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16977 +++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16978 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16979
16980 #include <asm/cpufeature.h>
16981
16982 - .section .altinstr_replacement,"ax"
16983 + .section .altinstr_replacement,"a"
16984 1: .byte 0xeb /* jmp <disp8> */
16985 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16986 2:
16987 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
16988 --- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16989 +++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16990 @@ -16,6 +16,7 @@
16991 #include <asm/thread_info.h>
16992 #include <asm/cpufeature.h>
16993 #include <asm/alternative-asm.h>
16994 +#include <asm/pgtable.h>
16995
16996 /*
16997 * By placing feature2 after feature1 in altinstructions section, we logically
16998 @@ -29,7 +30,7 @@
16999 .byte 0xe9 /* 32bit jump */
17000 .long \orig-1f /* by default jump to orig */
17001 1:
17002 - .section .altinstr_replacement,"ax"
17003 + .section .altinstr_replacement,"a"
17004 2: .byte 0xe9 /* near jump with 32bit immediate */
17005 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17006 3: .byte 0xe9 /* near jump with 32bit immediate */
17007 @@ -71,41 +72,13 @@
17008 #endif
17009 .endm
17010
17011 -/* Standard copy_to_user with segment limit checking */
17012 -ENTRY(_copy_to_user)
17013 - CFI_STARTPROC
17014 - GET_THREAD_INFO(%rax)
17015 - movq %rdi,%rcx
17016 - addq %rdx,%rcx
17017 - jc bad_to_user
17018 - cmpq TI_addr_limit(%rax),%rcx
17019 - ja bad_to_user
17020 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17021 - copy_user_generic_unrolled,copy_user_generic_string, \
17022 - copy_user_enhanced_fast_string
17023 - CFI_ENDPROC
17024 -ENDPROC(_copy_to_user)
17025 -
17026 -/* Standard copy_from_user with segment limit checking */
17027 -ENTRY(_copy_from_user)
17028 - CFI_STARTPROC
17029 - GET_THREAD_INFO(%rax)
17030 - movq %rsi,%rcx
17031 - addq %rdx,%rcx
17032 - jc bad_from_user
17033 - cmpq TI_addr_limit(%rax),%rcx
17034 - ja bad_from_user
17035 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17036 - copy_user_generic_unrolled,copy_user_generic_string, \
17037 - copy_user_enhanced_fast_string
17038 - CFI_ENDPROC
17039 -ENDPROC(_copy_from_user)
17040 -
17041 .section .fixup,"ax"
17042 /* must zero dest */
17043 ENTRY(bad_from_user)
17044 bad_from_user:
17045 CFI_STARTPROC
17046 + testl %edx,%edx
17047 + js bad_to_user
17048 movl %edx,%ecx
17049 xorl %eax,%eax
17050 rep
17051 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17052 --- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17053 +++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17054 @@ -14,6 +14,7 @@
17055 #include <asm/current.h>
17056 #include <asm/asm-offsets.h>
17057 #include <asm/thread_info.h>
17058 +#include <asm/pgtable.h>
17059
17060 .macro ALIGN_DESTINATION
17061 #ifdef FIX_ALIGNMENT
17062 @@ -50,6 +51,15 @@
17063 */
17064 ENTRY(__copy_user_nocache)
17065 CFI_STARTPROC
17066 +
17067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17068 + mov $PAX_USER_SHADOW_BASE,%rcx
17069 + cmp %rcx,%rsi
17070 + jae 1f
17071 + add %rcx,%rsi
17072 +1:
17073 +#endif
17074 +
17075 cmpl $8,%edx
17076 jb 20f /* less then 8 bytes, go to byte copy loop */
17077 ALIGN_DESTINATION
17078 diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17079 --- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17080 +++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17081 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17082 len -= 2;
17083 }
17084 }
17085 +
17086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17087 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17088 + src += PAX_USER_SHADOW_BASE;
17089 +#endif
17090 +
17091 isum = csum_partial_copy_generic((__force const void *)src,
17092 dst, len, isum, errp, NULL);
17093 if (unlikely(*errp))
17094 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17095 }
17096
17097 *errp = 0;
17098 +
17099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17100 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17101 + dst += PAX_USER_SHADOW_BASE;
17102 +#endif
17103 +
17104 return csum_partial_copy_generic(src, (void __force *)dst,
17105 len, isum, NULL, errp);
17106 }
17107 diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17108 --- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17109 +++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17110 @@ -33,14 +33,35 @@
17111 #include <asm/asm-offsets.h>
17112 #include <asm/thread_info.h>
17113 #include <asm/asm.h>
17114 +#include <asm/segment.h>
17115 +#include <asm/pgtable.h>
17116 +
17117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17118 +#define __copyuser_seg gs;
17119 +#else
17120 +#define __copyuser_seg
17121 +#endif
17122
17123 .text
17124 ENTRY(__get_user_1)
17125 CFI_STARTPROC
17126 +
17127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17128 GET_THREAD_INFO(%_ASM_DX)
17129 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17130 jae bad_get_user
17131 -1: movzb (%_ASM_AX),%edx
17132 +
17133 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17135 + cmp %_ASM_DX,%_ASM_AX
17136 + jae 1234f
17137 + add %_ASM_DX,%_ASM_AX
17138 +1234:
17139 +#endif
17140 +
17141 +#endif
17142 +
17143 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17144 xor %eax,%eax
17145 ret
17146 CFI_ENDPROC
17147 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17148 ENTRY(__get_user_2)
17149 CFI_STARTPROC
17150 add $1,%_ASM_AX
17151 +
17152 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17153 jc bad_get_user
17154 GET_THREAD_INFO(%_ASM_DX)
17155 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17156 jae bad_get_user
17157 -2: movzwl -1(%_ASM_AX),%edx
17158 +
17159 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17160 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17161 + cmp %_ASM_DX,%_ASM_AX
17162 + jae 1234f
17163 + add %_ASM_DX,%_ASM_AX
17164 +1234:
17165 +#endif
17166 +
17167 +#endif
17168 +
17169 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17170 xor %eax,%eax
17171 ret
17172 CFI_ENDPROC
17173 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17174 ENTRY(__get_user_4)
17175 CFI_STARTPROC
17176 add $3,%_ASM_AX
17177 +
17178 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17179 jc bad_get_user
17180 GET_THREAD_INFO(%_ASM_DX)
17181 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17182 jae bad_get_user
17183 -3: mov -3(%_ASM_AX),%edx
17184 +
17185 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17186 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17187 + cmp %_ASM_DX,%_ASM_AX
17188 + jae 1234f
17189 + add %_ASM_DX,%_ASM_AX
17190 +1234:
17191 +#endif
17192 +
17193 +#endif
17194 +
17195 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17196 xor %eax,%eax
17197 ret
17198 CFI_ENDPROC
17199 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17200 GET_THREAD_INFO(%_ASM_DX)
17201 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17202 jae bad_get_user
17203 +
17204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17205 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17206 + cmp %_ASM_DX,%_ASM_AX
17207 + jae 1234f
17208 + add %_ASM_DX,%_ASM_AX
17209 +1234:
17210 +#endif
17211 +
17212 4: movq -7(%_ASM_AX),%_ASM_DX
17213 xor %eax,%eax
17214 ret
17215 diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17216 --- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17217 +++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17218 @@ -21,6 +21,11 @@
17219 #include <linux/string.h>
17220 #include <asm/inat.h>
17221 #include <asm/insn.h>
17222 +#ifdef __KERNEL__
17223 +#include <asm/pgtable_types.h>
17224 +#else
17225 +#define ktla_ktva(addr) addr
17226 +#endif
17227
17228 #define get_next(t, insn) \
17229 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17230 @@ -40,8 +45,8 @@
17231 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17232 {
17233 memset(insn, 0, sizeof(*insn));
17234 - insn->kaddr = kaddr;
17235 - insn->next_byte = kaddr;
17236 + insn->kaddr = ktla_ktva(kaddr);
17237 + insn->next_byte = ktla_ktva(kaddr);
17238 insn->x86_64 = x86_64 ? 1 : 0;
17239 insn->opnd_bytes = 4;
17240 if (x86_64)
17241 diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17242 --- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17243 +++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17244 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17245 {
17246 void *p;
17247 int i;
17248 + unsigned long cr0;
17249
17250 if (unlikely(in_interrupt()))
17251 return __memcpy(to, from, len);
17252 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17253 kernel_fpu_begin();
17254
17255 __asm__ __volatile__ (
17256 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17257 - " prefetch 64(%0)\n"
17258 - " prefetch 128(%0)\n"
17259 - " prefetch 192(%0)\n"
17260 - " prefetch 256(%0)\n"
17261 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17262 + " prefetch 64(%1)\n"
17263 + " prefetch 128(%1)\n"
17264 + " prefetch 192(%1)\n"
17265 + " prefetch 256(%1)\n"
17266 "2: \n"
17267 ".section .fixup, \"ax\"\n"
17268 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17269 + "3: \n"
17270 +
17271 +#ifdef CONFIG_PAX_KERNEXEC
17272 + " movl %%cr0, %0\n"
17273 + " movl %0, %%eax\n"
17274 + " andl $0xFFFEFFFF, %%eax\n"
17275 + " movl %%eax, %%cr0\n"
17276 +#endif
17277 +
17278 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17279 +
17280 +#ifdef CONFIG_PAX_KERNEXEC
17281 + " movl %0, %%cr0\n"
17282 +#endif
17283 +
17284 " jmp 2b\n"
17285 ".previous\n"
17286 _ASM_EXTABLE(1b, 3b)
17287 - : : "r" (from));
17288 + : "=&r" (cr0) : "r" (from) : "ax");
17289
17290 for ( ; i > 5; i--) {
17291 __asm__ __volatile__ (
17292 - "1: prefetch 320(%0)\n"
17293 - "2: movq (%0), %%mm0\n"
17294 - " movq 8(%0), %%mm1\n"
17295 - " movq 16(%0), %%mm2\n"
17296 - " movq 24(%0), %%mm3\n"
17297 - " movq %%mm0, (%1)\n"
17298 - " movq %%mm1, 8(%1)\n"
17299 - " movq %%mm2, 16(%1)\n"
17300 - " movq %%mm3, 24(%1)\n"
17301 - " movq 32(%0), %%mm0\n"
17302 - " movq 40(%0), %%mm1\n"
17303 - " movq 48(%0), %%mm2\n"
17304 - " movq 56(%0), %%mm3\n"
17305 - " movq %%mm0, 32(%1)\n"
17306 - " movq %%mm1, 40(%1)\n"
17307 - " movq %%mm2, 48(%1)\n"
17308 - " movq %%mm3, 56(%1)\n"
17309 + "1: prefetch 320(%1)\n"
17310 + "2: movq (%1), %%mm0\n"
17311 + " movq 8(%1), %%mm1\n"
17312 + " movq 16(%1), %%mm2\n"
17313 + " movq 24(%1), %%mm3\n"
17314 + " movq %%mm0, (%2)\n"
17315 + " movq %%mm1, 8(%2)\n"
17316 + " movq %%mm2, 16(%2)\n"
17317 + " movq %%mm3, 24(%2)\n"
17318 + " movq 32(%1), %%mm0\n"
17319 + " movq 40(%1), %%mm1\n"
17320 + " movq 48(%1), %%mm2\n"
17321 + " movq 56(%1), %%mm3\n"
17322 + " movq %%mm0, 32(%2)\n"
17323 + " movq %%mm1, 40(%2)\n"
17324 + " movq %%mm2, 48(%2)\n"
17325 + " movq %%mm3, 56(%2)\n"
17326 ".section .fixup, \"ax\"\n"
17327 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17328 + "3:\n"
17329 +
17330 +#ifdef CONFIG_PAX_KERNEXEC
17331 + " movl %%cr0, %0\n"
17332 + " movl %0, %%eax\n"
17333 + " andl $0xFFFEFFFF, %%eax\n"
17334 + " movl %%eax, %%cr0\n"
17335 +#endif
17336 +
17337 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17338 +
17339 +#ifdef CONFIG_PAX_KERNEXEC
17340 + " movl %0, %%cr0\n"
17341 +#endif
17342 +
17343 " jmp 2b\n"
17344 ".previous\n"
17345 _ASM_EXTABLE(1b, 3b)
17346 - : : "r" (from), "r" (to) : "memory");
17347 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17348
17349 from += 64;
17350 to += 64;
17351 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17352 static void fast_copy_page(void *to, void *from)
17353 {
17354 int i;
17355 + unsigned long cr0;
17356
17357 kernel_fpu_begin();
17358
17359 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17360 * but that is for later. -AV
17361 */
17362 __asm__ __volatile__(
17363 - "1: prefetch (%0)\n"
17364 - " prefetch 64(%0)\n"
17365 - " prefetch 128(%0)\n"
17366 - " prefetch 192(%0)\n"
17367 - " prefetch 256(%0)\n"
17368 + "1: prefetch (%1)\n"
17369 + " prefetch 64(%1)\n"
17370 + " prefetch 128(%1)\n"
17371 + " prefetch 192(%1)\n"
17372 + " prefetch 256(%1)\n"
17373 "2: \n"
17374 ".section .fixup, \"ax\"\n"
17375 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17376 + "3: \n"
17377 +
17378 +#ifdef CONFIG_PAX_KERNEXEC
17379 + " movl %%cr0, %0\n"
17380 + " movl %0, %%eax\n"
17381 + " andl $0xFFFEFFFF, %%eax\n"
17382 + " movl %%eax, %%cr0\n"
17383 +#endif
17384 +
17385 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17386 +
17387 +#ifdef CONFIG_PAX_KERNEXEC
17388 + " movl %0, %%cr0\n"
17389 +#endif
17390 +
17391 " jmp 2b\n"
17392 ".previous\n"
17393 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17394 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17395
17396 for (i = 0; i < (4096-320)/64; i++) {
17397 __asm__ __volatile__ (
17398 - "1: prefetch 320(%0)\n"
17399 - "2: movq (%0), %%mm0\n"
17400 - " movntq %%mm0, (%1)\n"
17401 - " movq 8(%0), %%mm1\n"
17402 - " movntq %%mm1, 8(%1)\n"
17403 - " movq 16(%0), %%mm2\n"
17404 - " movntq %%mm2, 16(%1)\n"
17405 - " movq 24(%0), %%mm3\n"
17406 - " movntq %%mm3, 24(%1)\n"
17407 - " movq 32(%0), %%mm4\n"
17408 - " movntq %%mm4, 32(%1)\n"
17409 - " movq 40(%0), %%mm5\n"
17410 - " movntq %%mm5, 40(%1)\n"
17411 - " movq 48(%0), %%mm6\n"
17412 - " movntq %%mm6, 48(%1)\n"
17413 - " movq 56(%0), %%mm7\n"
17414 - " movntq %%mm7, 56(%1)\n"
17415 + "1: prefetch 320(%1)\n"
17416 + "2: movq (%1), %%mm0\n"
17417 + " movntq %%mm0, (%2)\n"
17418 + " movq 8(%1), %%mm1\n"
17419 + " movntq %%mm1, 8(%2)\n"
17420 + " movq 16(%1), %%mm2\n"
17421 + " movntq %%mm2, 16(%2)\n"
17422 + " movq 24(%1), %%mm3\n"
17423 + " movntq %%mm3, 24(%2)\n"
17424 + " movq 32(%1), %%mm4\n"
17425 + " movntq %%mm4, 32(%2)\n"
17426 + " movq 40(%1), %%mm5\n"
17427 + " movntq %%mm5, 40(%2)\n"
17428 + " movq 48(%1), %%mm6\n"
17429 + " movntq %%mm6, 48(%2)\n"
17430 + " movq 56(%1), %%mm7\n"
17431 + " movntq %%mm7, 56(%2)\n"
17432 ".section .fixup, \"ax\"\n"
17433 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17434 + "3:\n"
17435 +
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 + " movl %%cr0, %0\n"
17438 + " movl %0, %%eax\n"
17439 + " andl $0xFFFEFFFF, %%eax\n"
17440 + " movl %%eax, %%cr0\n"
17441 +#endif
17442 +
17443 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17444 +
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 + " movl %0, %%cr0\n"
17447 +#endif
17448 +
17449 " jmp 2b\n"
17450 ".previous\n"
17451 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17452 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17453
17454 from += 64;
17455 to += 64;
17456 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17457 static void fast_copy_page(void *to, void *from)
17458 {
17459 int i;
17460 + unsigned long cr0;
17461
17462 kernel_fpu_begin();
17463
17464 __asm__ __volatile__ (
17465 - "1: prefetch (%0)\n"
17466 - " prefetch 64(%0)\n"
17467 - " prefetch 128(%0)\n"
17468 - " prefetch 192(%0)\n"
17469 - " prefetch 256(%0)\n"
17470 + "1: prefetch (%1)\n"
17471 + " prefetch 64(%1)\n"
17472 + " prefetch 128(%1)\n"
17473 + " prefetch 192(%1)\n"
17474 + " prefetch 256(%1)\n"
17475 "2: \n"
17476 ".section .fixup, \"ax\"\n"
17477 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17478 + "3: \n"
17479 +
17480 +#ifdef CONFIG_PAX_KERNEXEC
17481 + " movl %%cr0, %0\n"
17482 + " movl %0, %%eax\n"
17483 + " andl $0xFFFEFFFF, %%eax\n"
17484 + " movl %%eax, %%cr0\n"
17485 +#endif
17486 +
17487 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 + " movl %0, %%cr0\n"
17491 +#endif
17492 +
17493 " jmp 2b\n"
17494 ".previous\n"
17495 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17496 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17497
17498 for (i = 0; i < 4096/64; i++) {
17499 __asm__ __volatile__ (
17500 - "1: prefetch 320(%0)\n"
17501 - "2: movq (%0), %%mm0\n"
17502 - " movq 8(%0), %%mm1\n"
17503 - " movq 16(%0), %%mm2\n"
17504 - " movq 24(%0), %%mm3\n"
17505 - " movq %%mm0, (%1)\n"
17506 - " movq %%mm1, 8(%1)\n"
17507 - " movq %%mm2, 16(%1)\n"
17508 - " movq %%mm3, 24(%1)\n"
17509 - " movq 32(%0), %%mm0\n"
17510 - " movq 40(%0), %%mm1\n"
17511 - " movq 48(%0), %%mm2\n"
17512 - " movq 56(%0), %%mm3\n"
17513 - " movq %%mm0, 32(%1)\n"
17514 - " movq %%mm1, 40(%1)\n"
17515 - " movq %%mm2, 48(%1)\n"
17516 - " movq %%mm3, 56(%1)\n"
17517 + "1: prefetch 320(%1)\n"
17518 + "2: movq (%1), %%mm0\n"
17519 + " movq 8(%1), %%mm1\n"
17520 + " movq 16(%1), %%mm2\n"
17521 + " movq 24(%1), %%mm3\n"
17522 + " movq %%mm0, (%2)\n"
17523 + " movq %%mm1, 8(%2)\n"
17524 + " movq %%mm2, 16(%2)\n"
17525 + " movq %%mm3, 24(%2)\n"
17526 + " movq 32(%1), %%mm0\n"
17527 + " movq 40(%1), %%mm1\n"
17528 + " movq 48(%1), %%mm2\n"
17529 + " movq 56(%1), %%mm3\n"
17530 + " movq %%mm0, 32(%2)\n"
17531 + " movq %%mm1, 40(%2)\n"
17532 + " movq %%mm2, 48(%2)\n"
17533 + " movq %%mm3, 56(%2)\n"
17534 ".section .fixup, \"ax\"\n"
17535 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17536 + "3:\n"
17537 +
17538 +#ifdef CONFIG_PAX_KERNEXEC
17539 + " movl %%cr0, %0\n"
17540 + " movl %0, %%eax\n"
17541 + " andl $0xFFFEFFFF, %%eax\n"
17542 + " movl %%eax, %%cr0\n"
17543 +#endif
17544 +
17545 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17546 +
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 + " movl %0, %%cr0\n"
17549 +#endif
17550 +
17551 " jmp 2b\n"
17552 ".previous\n"
17553 _ASM_EXTABLE(1b, 3b)
17554 - : : "r" (from), "r" (to) : "memory");
17555 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17556
17557 from += 64;
17558 to += 64;
17559 diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17560 --- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17561 +++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17562 @@ -15,7 +15,8 @@
17563 #include <asm/thread_info.h>
17564 #include <asm/errno.h>
17565 #include <asm/asm.h>
17566 -
17567 +#include <asm/segment.h>
17568 +#include <asm/pgtable.h>
17569
17570 /*
17571 * __put_user_X
17572 @@ -29,52 +30,119 @@
17573 * as they get called from within inline assembly.
17574 */
17575
17576 -#define ENTER CFI_STARTPROC ; \
17577 - GET_THREAD_INFO(%_ASM_BX)
17578 +#define ENTER CFI_STARTPROC
17579 #define EXIT ret ; \
17580 CFI_ENDPROC
17581
17582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17583 +#define _DEST %_ASM_CX,%_ASM_BX
17584 +#else
17585 +#define _DEST %_ASM_CX
17586 +#endif
17587 +
17588 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17589 +#define __copyuser_seg gs;
17590 +#else
17591 +#define __copyuser_seg
17592 +#endif
17593 +
17594 .text
17595 ENTRY(__put_user_1)
17596 ENTER
17597 +
17598 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17599 + GET_THREAD_INFO(%_ASM_BX)
17600 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17601 jae bad_put_user
17602 -1: movb %al,(%_ASM_CX)
17603 +
17604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17605 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17606 + cmp %_ASM_BX,%_ASM_CX
17607 + jb 1234f
17608 + xor %ebx,%ebx
17609 +1234:
17610 +#endif
17611 +
17612 +#endif
17613 +
17614 +1: __copyuser_seg movb %al,(_DEST)
17615 xor %eax,%eax
17616 EXIT
17617 ENDPROC(__put_user_1)
17618
17619 ENTRY(__put_user_2)
17620 ENTER
17621 +
17622 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17623 + GET_THREAD_INFO(%_ASM_BX)
17624 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17625 sub $1,%_ASM_BX
17626 cmp %_ASM_BX,%_ASM_CX
17627 jae bad_put_user
17628 -2: movw %ax,(%_ASM_CX)
17629 +
17630 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17631 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17632 + cmp %_ASM_BX,%_ASM_CX
17633 + jb 1234f
17634 + xor %ebx,%ebx
17635 +1234:
17636 +#endif
17637 +
17638 +#endif
17639 +
17640 +2: __copyuser_seg movw %ax,(_DEST)
17641 xor %eax,%eax
17642 EXIT
17643 ENDPROC(__put_user_2)
17644
17645 ENTRY(__put_user_4)
17646 ENTER
17647 +
17648 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17649 + GET_THREAD_INFO(%_ASM_BX)
17650 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17651 sub $3,%_ASM_BX
17652 cmp %_ASM_BX,%_ASM_CX
17653 jae bad_put_user
17654 -3: movl %eax,(%_ASM_CX)
17655 +
17656 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17657 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17658 + cmp %_ASM_BX,%_ASM_CX
17659 + jb 1234f
17660 + xor %ebx,%ebx
17661 +1234:
17662 +#endif
17663 +
17664 +#endif
17665 +
17666 +3: __copyuser_seg movl %eax,(_DEST)
17667 xor %eax,%eax
17668 EXIT
17669 ENDPROC(__put_user_4)
17670
17671 ENTRY(__put_user_8)
17672 ENTER
17673 +
17674 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17675 + GET_THREAD_INFO(%_ASM_BX)
17676 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17677 sub $7,%_ASM_BX
17678 cmp %_ASM_BX,%_ASM_CX
17679 jae bad_put_user
17680 -4: mov %_ASM_AX,(%_ASM_CX)
17681 +
17682 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17683 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17684 + cmp %_ASM_BX,%_ASM_CX
17685 + jb 1234f
17686 + xor %ebx,%ebx
17687 +1234:
17688 +#endif
17689 +
17690 +#endif
17691 +
17692 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17693 #ifdef CONFIG_X86_32
17694 -5: movl %edx,4(%_ASM_CX)
17695 +5: __copyuser_seg movl %edx,4(_DEST)
17696 #endif
17697 xor %eax,%eax
17698 EXIT
17699 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
17700 --- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17701 +++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17702 @@ -43,7 +43,7 @@ do { \
17703 __asm__ __volatile__( \
17704 " testl %1,%1\n" \
17705 " jz 2f\n" \
17706 - "0: lodsb\n" \
17707 + "0: "__copyuser_seg"lodsb\n" \
17708 " stosb\n" \
17709 " testb %%al,%%al\n" \
17710 " jz 1f\n" \
17711 @@ -128,10 +128,12 @@ do { \
17712 int __d0; \
17713 might_fault(); \
17714 __asm__ __volatile__( \
17715 + __COPYUSER_SET_ES \
17716 "0: rep; stosl\n" \
17717 " movl %2,%0\n" \
17718 "1: rep; stosb\n" \
17719 "2:\n" \
17720 + __COPYUSER_RESTORE_ES \
17721 ".section .fixup,\"ax\"\n" \
17722 "3: lea 0(%2,%0,4),%0\n" \
17723 " jmp 2b\n" \
17724 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17725 might_fault();
17726
17727 __asm__ __volatile__(
17728 + __COPYUSER_SET_ES
17729 " testl %0, %0\n"
17730 " jz 3f\n"
17731 " andl %0,%%ecx\n"
17732 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17733 " subl %%ecx,%0\n"
17734 " addl %0,%%eax\n"
17735 "1:\n"
17736 + __COPYUSER_RESTORE_ES
17737 ".section .fixup,\"ax\"\n"
17738 "2: xorl %%eax,%%eax\n"
17739 " jmp 1b\n"
17740 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17741
17742 #ifdef CONFIG_X86_INTEL_USERCOPY
17743 static unsigned long
17744 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17745 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17746 {
17747 int d0, d1;
17748 __asm__ __volatile__(
17749 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17750 " .align 2,0x90\n"
17751 "3: movl 0(%4), %%eax\n"
17752 "4: movl 4(%4), %%edx\n"
17753 - "5: movl %%eax, 0(%3)\n"
17754 - "6: movl %%edx, 4(%3)\n"
17755 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17756 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17757 "7: movl 8(%4), %%eax\n"
17758 "8: movl 12(%4),%%edx\n"
17759 - "9: movl %%eax, 8(%3)\n"
17760 - "10: movl %%edx, 12(%3)\n"
17761 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17762 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17763 "11: movl 16(%4), %%eax\n"
17764 "12: movl 20(%4), %%edx\n"
17765 - "13: movl %%eax, 16(%3)\n"
17766 - "14: movl %%edx, 20(%3)\n"
17767 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17768 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17769 "15: movl 24(%4), %%eax\n"
17770 "16: movl 28(%4), %%edx\n"
17771 - "17: movl %%eax, 24(%3)\n"
17772 - "18: movl %%edx, 28(%3)\n"
17773 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17774 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17775 "19: movl 32(%4), %%eax\n"
17776 "20: movl 36(%4), %%edx\n"
17777 - "21: movl %%eax, 32(%3)\n"
17778 - "22: movl %%edx, 36(%3)\n"
17779 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17780 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17781 "23: movl 40(%4), %%eax\n"
17782 "24: movl 44(%4), %%edx\n"
17783 - "25: movl %%eax, 40(%3)\n"
17784 - "26: movl %%edx, 44(%3)\n"
17785 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17786 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17787 "27: movl 48(%4), %%eax\n"
17788 "28: movl 52(%4), %%edx\n"
17789 - "29: movl %%eax, 48(%3)\n"
17790 - "30: movl %%edx, 52(%3)\n"
17791 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17792 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17793 "31: movl 56(%4), %%eax\n"
17794 "32: movl 60(%4), %%edx\n"
17795 - "33: movl %%eax, 56(%3)\n"
17796 - "34: movl %%edx, 60(%3)\n"
17797 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17798 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17799 " addl $-64, %0\n"
17800 " addl $64, %4\n"
17801 " addl $64, %3\n"
17802 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17803 " shrl $2, %0\n"
17804 " andl $3, %%eax\n"
17805 " cld\n"
17806 + __COPYUSER_SET_ES
17807 "99: rep; movsl\n"
17808 "36: movl %%eax, %0\n"
17809 "37: rep; movsb\n"
17810 "100:\n"
17811 + __COPYUSER_RESTORE_ES
17812 + ".section .fixup,\"ax\"\n"
17813 + "101: lea 0(%%eax,%0,4),%0\n"
17814 + " jmp 100b\n"
17815 + ".previous\n"
17816 + ".section __ex_table,\"a\"\n"
17817 + " .align 4\n"
17818 + " .long 1b,100b\n"
17819 + " .long 2b,100b\n"
17820 + " .long 3b,100b\n"
17821 + " .long 4b,100b\n"
17822 + " .long 5b,100b\n"
17823 + " .long 6b,100b\n"
17824 + " .long 7b,100b\n"
17825 + " .long 8b,100b\n"
17826 + " .long 9b,100b\n"
17827 + " .long 10b,100b\n"
17828 + " .long 11b,100b\n"
17829 + " .long 12b,100b\n"
17830 + " .long 13b,100b\n"
17831 + " .long 14b,100b\n"
17832 + " .long 15b,100b\n"
17833 + " .long 16b,100b\n"
17834 + " .long 17b,100b\n"
17835 + " .long 18b,100b\n"
17836 + " .long 19b,100b\n"
17837 + " .long 20b,100b\n"
17838 + " .long 21b,100b\n"
17839 + " .long 22b,100b\n"
17840 + " .long 23b,100b\n"
17841 + " .long 24b,100b\n"
17842 + " .long 25b,100b\n"
17843 + " .long 26b,100b\n"
17844 + " .long 27b,100b\n"
17845 + " .long 28b,100b\n"
17846 + " .long 29b,100b\n"
17847 + " .long 30b,100b\n"
17848 + " .long 31b,100b\n"
17849 + " .long 32b,100b\n"
17850 + " .long 33b,100b\n"
17851 + " .long 34b,100b\n"
17852 + " .long 35b,100b\n"
17853 + " .long 36b,100b\n"
17854 + " .long 37b,100b\n"
17855 + " .long 99b,101b\n"
17856 + ".previous"
17857 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17858 + : "1"(to), "2"(from), "0"(size)
17859 + : "eax", "edx", "memory");
17860 + return size;
17861 +}
17862 +
17863 +static unsigned long
17864 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17865 +{
17866 + int d0, d1;
17867 + __asm__ __volatile__(
17868 + " .align 2,0x90\n"
17869 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17870 + " cmpl $67, %0\n"
17871 + " jbe 3f\n"
17872 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17873 + " .align 2,0x90\n"
17874 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17875 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17876 + "5: movl %%eax, 0(%3)\n"
17877 + "6: movl %%edx, 4(%3)\n"
17878 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17879 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17880 + "9: movl %%eax, 8(%3)\n"
17881 + "10: movl %%edx, 12(%3)\n"
17882 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17883 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17884 + "13: movl %%eax, 16(%3)\n"
17885 + "14: movl %%edx, 20(%3)\n"
17886 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17887 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17888 + "17: movl %%eax, 24(%3)\n"
17889 + "18: movl %%edx, 28(%3)\n"
17890 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17891 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17892 + "21: movl %%eax, 32(%3)\n"
17893 + "22: movl %%edx, 36(%3)\n"
17894 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17895 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17896 + "25: movl %%eax, 40(%3)\n"
17897 + "26: movl %%edx, 44(%3)\n"
17898 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17899 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17900 + "29: movl %%eax, 48(%3)\n"
17901 + "30: movl %%edx, 52(%3)\n"
17902 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17903 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17904 + "33: movl %%eax, 56(%3)\n"
17905 + "34: movl %%edx, 60(%3)\n"
17906 + " addl $-64, %0\n"
17907 + " addl $64, %4\n"
17908 + " addl $64, %3\n"
17909 + " cmpl $63, %0\n"
17910 + " ja 1b\n"
17911 + "35: movl %0, %%eax\n"
17912 + " shrl $2, %0\n"
17913 + " andl $3, %%eax\n"
17914 + " cld\n"
17915 + "99: rep; "__copyuser_seg" movsl\n"
17916 + "36: movl %%eax, %0\n"
17917 + "37: rep; "__copyuser_seg" movsb\n"
17918 + "100:\n"
17919 ".section .fixup,\"ax\"\n"
17920 "101: lea 0(%%eax,%0,4),%0\n"
17921 " jmp 100b\n"
17922 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17923 int d0, d1;
17924 __asm__ __volatile__(
17925 " .align 2,0x90\n"
17926 - "0: movl 32(%4), %%eax\n"
17927 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17928 " cmpl $67, %0\n"
17929 " jbe 2f\n"
17930 - "1: movl 64(%4), %%eax\n"
17931 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17932 " .align 2,0x90\n"
17933 - "2: movl 0(%4), %%eax\n"
17934 - "21: movl 4(%4), %%edx\n"
17935 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17936 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17937 " movl %%eax, 0(%3)\n"
17938 " movl %%edx, 4(%3)\n"
17939 - "3: movl 8(%4), %%eax\n"
17940 - "31: movl 12(%4),%%edx\n"
17941 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17942 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17943 " movl %%eax, 8(%3)\n"
17944 " movl %%edx, 12(%3)\n"
17945 - "4: movl 16(%4), %%eax\n"
17946 - "41: movl 20(%4), %%edx\n"
17947 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17948 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17949 " movl %%eax, 16(%3)\n"
17950 " movl %%edx, 20(%3)\n"
17951 - "10: movl 24(%4), %%eax\n"
17952 - "51: movl 28(%4), %%edx\n"
17953 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17954 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17955 " movl %%eax, 24(%3)\n"
17956 " movl %%edx, 28(%3)\n"
17957 - "11: movl 32(%4), %%eax\n"
17958 - "61: movl 36(%4), %%edx\n"
17959 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17960 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17961 " movl %%eax, 32(%3)\n"
17962 " movl %%edx, 36(%3)\n"
17963 - "12: movl 40(%4), %%eax\n"
17964 - "71: movl 44(%4), %%edx\n"
17965 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17966 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17967 " movl %%eax, 40(%3)\n"
17968 " movl %%edx, 44(%3)\n"
17969 - "13: movl 48(%4), %%eax\n"
17970 - "81: movl 52(%4), %%edx\n"
17971 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17972 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17973 " movl %%eax, 48(%3)\n"
17974 " movl %%edx, 52(%3)\n"
17975 - "14: movl 56(%4), %%eax\n"
17976 - "91: movl 60(%4), %%edx\n"
17977 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17978 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17979 " movl %%eax, 56(%3)\n"
17980 " movl %%edx, 60(%3)\n"
17981 " addl $-64, %0\n"
17982 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17983 " shrl $2, %0\n"
17984 " andl $3, %%eax\n"
17985 " cld\n"
17986 - "6: rep; movsl\n"
17987 + "6: rep; "__copyuser_seg" movsl\n"
17988 " movl %%eax,%0\n"
17989 - "7: rep; movsb\n"
17990 + "7: rep; "__copyuser_seg" movsb\n"
17991 "8:\n"
17992 ".section .fixup,\"ax\"\n"
17993 "9: lea 0(%%eax,%0,4),%0\n"
17994 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17995
17996 __asm__ __volatile__(
17997 " .align 2,0x90\n"
17998 - "0: movl 32(%4), %%eax\n"
17999 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18000 " cmpl $67, %0\n"
18001 " jbe 2f\n"
18002 - "1: movl 64(%4), %%eax\n"
18003 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18004 " .align 2,0x90\n"
18005 - "2: movl 0(%4), %%eax\n"
18006 - "21: movl 4(%4), %%edx\n"
18007 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18008 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18009 " movnti %%eax, 0(%3)\n"
18010 " movnti %%edx, 4(%3)\n"
18011 - "3: movl 8(%4), %%eax\n"
18012 - "31: movl 12(%4),%%edx\n"
18013 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18014 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18015 " movnti %%eax, 8(%3)\n"
18016 " movnti %%edx, 12(%3)\n"
18017 - "4: movl 16(%4), %%eax\n"
18018 - "41: movl 20(%4), %%edx\n"
18019 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18020 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18021 " movnti %%eax, 16(%3)\n"
18022 " movnti %%edx, 20(%3)\n"
18023 - "10: movl 24(%4), %%eax\n"
18024 - "51: movl 28(%4), %%edx\n"
18025 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18026 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18027 " movnti %%eax, 24(%3)\n"
18028 " movnti %%edx, 28(%3)\n"
18029 - "11: movl 32(%4), %%eax\n"
18030 - "61: movl 36(%4), %%edx\n"
18031 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18032 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18033 " movnti %%eax, 32(%3)\n"
18034 " movnti %%edx, 36(%3)\n"
18035 - "12: movl 40(%4), %%eax\n"
18036 - "71: movl 44(%4), %%edx\n"
18037 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18038 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18039 " movnti %%eax, 40(%3)\n"
18040 " movnti %%edx, 44(%3)\n"
18041 - "13: movl 48(%4), %%eax\n"
18042 - "81: movl 52(%4), %%edx\n"
18043 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18044 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18045 " movnti %%eax, 48(%3)\n"
18046 " movnti %%edx, 52(%3)\n"
18047 - "14: movl 56(%4), %%eax\n"
18048 - "91: movl 60(%4), %%edx\n"
18049 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18050 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18051 " movnti %%eax, 56(%3)\n"
18052 " movnti %%edx, 60(%3)\n"
18053 " addl $-64, %0\n"
18054 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18055 " shrl $2, %0\n"
18056 " andl $3, %%eax\n"
18057 " cld\n"
18058 - "6: rep; movsl\n"
18059 + "6: rep; "__copyuser_seg" movsl\n"
18060 " movl %%eax,%0\n"
18061 - "7: rep; movsb\n"
18062 + "7: rep; "__copyuser_seg" movsb\n"
18063 "8:\n"
18064 ".section .fixup,\"ax\"\n"
18065 "9: lea 0(%%eax,%0,4),%0\n"
18066 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18067
18068 __asm__ __volatile__(
18069 " .align 2,0x90\n"
18070 - "0: movl 32(%4), %%eax\n"
18071 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18072 " cmpl $67, %0\n"
18073 " jbe 2f\n"
18074 - "1: movl 64(%4), %%eax\n"
18075 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18076 " .align 2,0x90\n"
18077 - "2: movl 0(%4), %%eax\n"
18078 - "21: movl 4(%4), %%edx\n"
18079 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18080 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18081 " movnti %%eax, 0(%3)\n"
18082 " movnti %%edx, 4(%3)\n"
18083 - "3: movl 8(%4), %%eax\n"
18084 - "31: movl 12(%4),%%edx\n"
18085 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18086 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18087 " movnti %%eax, 8(%3)\n"
18088 " movnti %%edx, 12(%3)\n"
18089 - "4: movl 16(%4), %%eax\n"
18090 - "41: movl 20(%4), %%edx\n"
18091 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18092 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18093 " movnti %%eax, 16(%3)\n"
18094 " movnti %%edx, 20(%3)\n"
18095 - "10: movl 24(%4), %%eax\n"
18096 - "51: movl 28(%4), %%edx\n"
18097 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18098 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18099 " movnti %%eax, 24(%3)\n"
18100 " movnti %%edx, 28(%3)\n"
18101 - "11: movl 32(%4), %%eax\n"
18102 - "61: movl 36(%4), %%edx\n"
18103 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18104 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18105 " movnti %%eax, 32(%3)\n"
18106 " movnti %%edx, 36(%3)\n"
18107 - "12: movl 40(%4), %%eax\n"
18108 - "71: movl 44(%4), %%edx\n"
18109 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18110 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18111 " movnti %%eax, 40(%3)\n"
18112 " movnti %%edx, 44(%3)\n"
18113 - "13: movl 48(%4), %%eax\n"
18114 - "81: movl 52(%4), %%edx\n"
18115 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18116 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18117 " movnti %%eax, 48(%3)\n"
18118 " movnti %%edx, 52(%3)\n"
18119 - "14: movl 56(%4), %%eax\n"
18120 - "91: movl 60(%4), %%edx\n"
18121 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18122 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18123 " movnti %%eax, 56(%3)\n"
18124 " movnti %%edx, 60(%3)\n"
18125 " addl $-64, %0\n"
18126 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18127 " shrl $2, %0\n"
18128 " andl $3, %%eax\n"
18129 " cld\n"
18130 - "6: rep; movsl\n"
18131 + "6: rep; "__copyuser_seg" movsl\n"
18132 " movl %%eax,%0\n"
18133 - "7: rep; movsb\n"
18134 + "7: rep; "__copyuser_seg" movsb\n"
18135 "8:\n"
18136 ".section .fixup,\"ax\"\n"
18137 "9: lea 0(%%eax,%0,4),%0\n"
18138 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18139 */
18140 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18141 unsigned long size);
18142 -unsigned long __copy_user_intel(void __user *to, const void *from,
18143 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18144 + unsigned long size);
18145 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18146 unsigned long size);
18147 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18148 const void __user *from, unsigned long size);
18149 #endif /* CONFIG_X86_INTEL_USERCOPY */
18150
18151 /* Generic arbitrary sized copy. */
18152 -#define __copy_user(to, from, size) \
18153 +#define __copy_user(to, from, size, prefix, set, restore) \
18154 do { \
18155 int __d0, __d1, __d2; \
18156 __asm__ __volatile__( \
18157 + set \
18158 " cmp $7,%0\n" \
18159 " jbe 1f\n" \
18160 " movl %1,%0\n" \
18161 " negl %0\n" \
18162 " andl $7,%0\n" \
18163 " subl %0,%3\n" \
18164 - "4: rep; movsb\n" \
18165 + "4: rep; "prefix"movsb\n" \
18166 " movl %3,%0\n" \
18167 " shrl $2,%0\n" \
18168 " andl $3,%3\n" \
18169 " .align 2,0x90\n" \
18170 - "0: rep; movsl\n" \
18171 + "0: rep; "prefix"movsl\n" \
18172 " movl %3,%0\n" \
18173 - "1: rep; movsb\n" \
18174 + "1: rep; "prefix"movsb\n" \
18175 "2:\n" \
18176 + restore \
18177 ".section .fixup,\"ax\"\n" \
18178 "5: addl %3,%0\n" \
18179 " jmp 2b\n" \
18180 @@ -682,14 +799,14 @@ do { \
18181 " negl %0\n" \
18182 " andl $7,%0\n" \
18183 " subl %0,%3\n" \
18184 - "4: rep; movsb\n" \
18185 + "4: rep; "__copyuser_seg"movsb\n" \
18186 " movl %3,%0\n" \
18187 " shrl $2,%0\n" \
18188 " andl $3,%3\n" \
18189 " .align 2,0x90\n" \
18190 - "0: rep; movsl\n" \
18191 + "0: rep; "__copyuser_seg"movsl\n" \
18192 " movl %3,%0\n" \
18193 - "1: rep; movsb\n" \
18194 + "1: rep; "__copyuser_seg"movsb\n" \
18195 "2:\n" \
18196 ".section .fixup,\"ax\"\n" \
18197 "5: addl %3,%0\n" \
18198 @@ -775,9 +892,9 @@ survive:
18199 }
18200 #endif
18201 if (movsl_is_ok(to, from, n))
18202 - __copy_user(to, from, n);
18203 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18204 else
18205 - n = __copy_user_intel(to, from, n);
18206 + n = __generic_copy_to_user_intel(to, from, n);
18207 return n;
18208 }
18209 EXPORT_SYMBOL(__copy_to_user_ll);
18210 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18211 unsigned long n)
18212 {
18213 if (movsl_is_ok(to, from, n))
18214 - __copy_user(to, from, n);
18215 + __copy_user(to, from, n, __copyuser_seg, "", "");
18216 else
18217 - n = __copy_user_intel((void __user *)to,
18218 - (const void *)from, n);
18219 + n = __generic_copy_from_user_intel(to, from, n);
18220 return n;
18221 }
18222 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18223 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18224 if (n > 64 && cpu_has_xmm2)
18225 n = __copy_user_intel_nocache(to, from, n);
18226 else
18227 - __copy_user(to, from, n);
18228 + __copy_user(to, from, n, __copyuser_seg, "", "");
18229 #else
18230 - __copy_user(to, from, n);
18231 + __copy_user(to, from, n, __copyuser_seg, "", "");
18232 #endif
18233 return n;
18234 }
18235 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18236
18237 -/**
18238 - * copy_to_user: - Copy a block of data into user space.
18239 - * @to: Destination address, in user space.
18240 - * @from: Source address, in kernel space.
18241 - * @n: Number of bytes to copy.
18242 - *
18243 - * Context: User context only. This function may sleep.
18244 - *
18245 - * Copy data from kernel space to user space.
18246 - *
18247 - * Returns number of bytes that could not be copied.
18248 - * On success, this will be zero.
18249 - */
18250 -unsigned long
18251 -copy_to_user(void __user *to, const void *from, unsigned long n)
18252 +void copy_from_user_overflow(void)
18253 {
18254 - if (access_ok(VERIFY_WRITE, to, n))
18255 - n = __copy_to_user(to, from, n);
18256 - return n;
18257 + WARN(1, "Buffer overflow detected!\n");
18258 }
18259 -EXPORT_SYMBOL(copy_to_user);
18260 +EXPORT_SYMBOL(copy_from_user_overflow);
18261
18262 -/**
18263 - * copy_from_user: - Copy a block of data from user space.
18264 - * @to: Destination address, in kernel space.
18265 - * @from: Source address, in user space.
18266 - * @n: Number of bytes to copy.
18267 - *
18268 - * Context: User context only. This function may sleep.
18269 - *
18270 - * Copy data from user space to kernel space.
18271 - *
18272 - * Returns number of bytes that could not be copied.
18273 - * On success, this will be zero.
18274 - *
18275 - * If some data could not be copied, this function will pad the copied
18276 - * data to the requested size using zero bytes.
18277 - */
18278 -unsigned long
18279 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18280 +void copy_to_user_overflow(void)
18281 {
18282 - if (access_ok(VERIFY_READ, from, n))
18283 - n = __copy_from_user(to, from, n);
18284 - else
18285 - memset(to, 0, n);
18286 - return n;
18287 + WARN(1, "Buffer overflow detected!\n");
18288 }
18289 -EXPORT_SYMBOL(_copy_from_user);
18290 +EXPORT_SYMBOL(copy_to_user_overflow);
18291
18292 -void copy_from_user_overflow(void)
18293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18294 +void __set_fs(mm_segment_t x)
18295 {
18296 - WARN(1, "Buffer overflow detected!\n");
18297 + switch (x.seg) {
18298 + case 0:
18299 + loadsegment(gs, 0);
18300 + break;
18301 + case TASK_SIZE_MAX:
18302 + loadsegment(gs, __USER_DS);
18303 + break;
18304 + case -1UL:
18305 + loadsegment(gs, __KERNEL_DS);
18306 + break;
18307 + default:
18308 + BUG();
18309 + }
18310 + return;
18311 }
18312 -EXPORT_SYMBOL(copy_from_user_overflow);
18313 +EXPORT_SYMBOL(__set_fs);
18314 +
18315 +void set_fs(mm_segment_t x)
18316 +{
18317 + current_thread_info()->addr_limit = x;
18318 + __set_fs(x);
18319 +}
18320 +EXPORT_SYMBOL(set_fs);
18321 +#endif
18322 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18323 --- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18324 +++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18325 @@ -42,6 +42,12 @@ long
18326 __strncpy_from_user(char *dst, const char __user *src, long count)
18327 {
18328 long res;
18329 +
18330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18331 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18332 + src += PAX_USER_SHADOW_BASE;
18333 +#endif
18334 +
18335 __do_strncpy_from_user(dst, src, count, res);
18336 return res;
18337 }
18338 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18339 {
18340 long __d0;
18341 might_fault();
18342 +
18343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18344 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18345 + addr += PAX_USER_SHADOW_BASE;
18346 +#endif
18347 +
18348 /* no memory constraint because it doesn't change any memory gcc knows
18349 about */
18350 asm volatile(
18351 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18352
18353 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18354 {
18355 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18356 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18357 +
18358 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18359 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18360 + to += PAX_USER_SHADOW_BASE;
18361 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18362 + from += PAX_USER_SHADOW_BASE;
18363 +#endif
18364 +
18365 return copy_user_generic((__force void *)to, (__force void *)from, len);
18366 - }
18367 - return len;
18368 + }
18369 + return len;
18370 }
18371 EXPORT_SYMBOL(copy_in_user);
18372
18373 diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18374 --- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18375 +++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18376 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18377 else
18378 BITS := 64
18379 UTS_MACHINE := x86_64
18380 + biarch := $(call cc-option,-m64)
18381 CHECKFLAGS += -D__x86_64__ -m64
18382
18383 KBUILD_AFLAGS += -m64
18384 @@ -195,3 +196,12 @@ define archhelp
18385 echo ' FDARGS="..." arguments for the booted kernel'
18386 echo ' FDINITRD=file initrd for the booted kernel'
18387 endef
18388 +
18389 +define OLD_LD
18390 +
18391 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18392 +*** Please upgrade your binutils to 2.18 or newer
18393 +endef
18394 +
18395 +archprepare:
18396 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18397 diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18398 --- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18399 +++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18400 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18401 const struct exception_table_entry *fixup;
18402
18403 #ifdef CONFIG_PNPBIOS
18404 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18405 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18406 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18407 extern u32 pnp_bios_is_utter_crap;
18408 pnp_bios_is_utter_crap = 1;
18409 diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18410 --- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18411 +++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18412 @@ -13,10 +13,18 @@
18413 #include <linux/perf_event.h> /* perf_sw_event */
18414 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18415 #include <linux/prefetch.h> /* prefetchw */
18416 +#include <linux/unistd.h>
18417 +#include <linux/compiler.h>
18418
18419 #include <asm/traps.h> /* dotraplinkage, ... */
18420 #include <asm/pgalloc.h> /* pgd_*(), ... */
18421 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18422 +#include <asm/vsyscall.h>
18423 +#include <asm/tlbflush.h>
18424 +
18425 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18426 +#include <asm/stacktrace.h>
18427 +#endif
18428
18429 /*
18430 * Page fault error code bits:
18431 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18432 int ret = 0;
18433
18434 /* kprobe_running() needs smp_processor_id() */
18435 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18436 + if (kprobes_built_in() && !user_mode(regs)) {
18437 preempt_disable();
18438 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18439 ret = 1;
18440 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18441 return !instr_lo || (instr_lo>>1) == 1;
18442 case 0x00:
18443 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18444 - if (probe_kernel_address(instr, opcode))
18445 + if (user_mode(regs)) {
18446 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18447 + return 0;
18448 + } else if (probe_kernel_address(instr, opcode))
18449 return 0;
18450
18451 *prefetch = (instr_lo == 0xF) &&
18452 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18453 while (instr < max_instr) {
18454 unsigned char opcode;
18455
18456 - if (probe_kernel_address(instr, opcode))
18457 + if (user_mode(regs)) {
18458 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18459 + break;
18460 + } else if (probe_kernel_address(instr, opcode))
18461 break;
18462
18463 instr++;
18464 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18465 force_sig_info(si_signo, &info, tsk);
18466 }
18467
18468 +#ifdef CONFIG_PAX_EMUTRAMP
18469 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18470 +#endif
18471 +
18472 +#ifdef CONFIG_PAX_PAGEEXEC
18473 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18474 +{
18475 + pgd_t *pgd;
18476 + pud_t *pud;
18477 + pmd_t *pmd;
18478 +
18479 + pgd = pgd_offset(mm, address);
18480 + if (!pgd_present(*pgd))
18481 + return NULL;
18482 + pud = pud_offset(pgd, address);
18483 + if (!pud_present(*pud))
18484 + return NULL;
18485 + pmd = pmd_offset(pud, address);
18486 + if (!pmd_present(*pmd))
18487 + return NULL;
18488 + return pmd;
18489 +}
18490 +#endif
18491 +
18492 DEFINE_SPINLOCK(pgd_lock);
18493 LIST_HEAD(pgd_list);
18494
18495 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18496 for (address = VMALLOC_START & PMD_MASK;
18497 address >= TASK_SIZE && address < FIXADDR_TOP;
18498 address += PMD_SIZE) {
18499 +
18500 +#ifdef CONFIG_PAX_PER_CPU_PGD
18501 + unsigned long cpu;
18502 +#else
18503 struct page *page;
18504 +#endif
18505
18506 spin_lock(&pgd_lock);
18507 +
18508 +#ifdef CONFIG_PAX_PER_CPU_PGD
18509 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18510 + pgd_t *pgd = get_cpu_pgd(cpu);
18511 + pmd_t *ret;
18512 +#else
18513 list_for_each_entry(page, &pgd_list, lru) {
18514 + pgd_t *pgd = page_address(page);
18515 spinlock_t *pgt_lock;
18516 pmd_t *ret;
18517
18518 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18519 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18520
18521 spin_lock(pgt_lock);
18522 - ret = vmalloc_sync_one(page_address(page), address);
18523 +#endif
18524 +
18525 + ret = vmalloc_sync_one(pgd, address);
18526 +
18527 +#ifndef CONFIG_PAX_PER_CPU_PGD
18528 spin_unlock(pgt_lock);
18529 +#endif
18530
18531 if (!ret)
18532 break;
18533 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18534 * an interrupt in the middle of a task switch..
18535 */
18536 pgd_paddr = read_cr3();
18537 +
18538 +#ifdef CONFIG_PAX_PER_CPU_PGD
18539 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18540 +#endif
18541 +
18542 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18543 if (!pmd_k)
18544 return -1;
18545 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18546 * happen within a race in page table update. In the later
18547 * case just flush:
18548 */
18549 +
18550 +#ifdef CONFIG_PAX_PER_CPU_PGD
18551 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18552 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18553 +#else
18554 pgd = pgd_offset(current->active_mm, address);
18555 +#endif
18556 +
18557 pgd_ref = pgd_offset_k(address);
18558 if (pgd_none(*pgd_ref))
18559 return -1;
18560 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18561 static int is_errata100(struct pt_regs *regs, unsigned long address)
18562 {
18563 #ifdef CONFIG_X86_64
18564 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18565 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18566 return 1;
18567 #endif
18568 return 0;
18569 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18570 }
18571
18572 static const char nx_warning[] = KERN_CRIT
18573 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18574 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18575
18576 static void
18577 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18578 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18579 if (!oops_may_print())
18580 return;
18581
18582 - if (error_code & PF_INSTR) {
18583 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18584 unsigned int level;
18585
18586 pte_t *pte = lookup_address(address, &level);
18587
18588 if (pte && pte_present(*pte) && !pte_exec(*pte))
18589 - printk(nx_warning, current_uid());
18590 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18591 + }
18592 +
18593 +#ifdef CONFIG_PAX_KERNEXEC
18594 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18595 + if (current->signal->curr_ip)
18596 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18597 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18598 + else
18599 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18600 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18601 }
18602 +#endif
18603
18604 printk(KERN_ALERT "BUG: unable to handle kernel ");
18605 if (address < PAGE_SIZE)
18606 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18607 unsigned long address, int si_code)
18608 {
18609 struct task_struct *tsk = current;
18610 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18611 + struct mm_struct *mm = tsk->mm;
18612 +#endif
18613 +
18614 +#ifdef CONFIG_X86_64
18615 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18616 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18617 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18618 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18619 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18620 + return;
18621 + }
18622 + }
18623 +#endif
18624 +
18625 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18626 + if (mm && (error_code & PF_USER)) {
18627 + unsigned long ip = regs->ip;
18628 +
18629 + if (v8086_mode(regs))
18630 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18631 +
18632 + /*
18633 + * It's possible to have interrupts off here:
18634 + */
18635 + local_irq_enable();
18636 +
18637 +#ifdef CONFIG_PAX_PAGEEXEC
18638 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18639 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18640 +
18641 +#ifdef CONFIG_PAX_EMUTRAMP
18642 + switch (pax_handle_fetch_fault(regs)) {
18643 + case 2:
18644 + return;
18645 + }
18646 +#endif
18647 +
18648 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18649 + do_group_exit(SIGKILL);
18650 + }
18651 +#endif
18652 +
18653 +#ifdef CONFIG_PAX_SEGMEXEC
18654 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18655 +
18656 +#ifdef CONFIG_PAX_EMUTRAMP
18657 + switch (pax_handle_fetch_fault(regs)) {
18658 + case 2:
18659 + return;
18660 + }
18661 +#endif
18662 +
18663 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18664 + do_group_exit(SIGKILL);
18665 + }
18666 +#endif
18667 +
18668 + }
18669 +#endif
18670
18671 /* User mode accesses just cause a SIGSEGV */
18672 if (error_code & PF_USER) {
18673 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18674 return 1;
18675 }
18676
18677 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18678 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18679 +{
18680 + pte_t *pte;
18681 + pmd_t *pmd;
18682 + spinlock_t *ptl;
18683 + unsigned char pte_mask;
18684 +
18685 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18686 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18687 + return 0;
18688 +
18689 + /* PaX: it's our fault, let's handle it if we can */
18690 +
18691 + /* PaX: take a look at read faults before acquiring any locks */
18692 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18693 + /* instruction fetch attempt from a protected page in user mode */
18694 + up_read(&mm->mmap_sem);
18695 +
18696 +#ifdef CONFIG_PAX_EMUTRAMP
18697 + switch (pax_handle_fetch_fault(regs)) {
18698 + case 2:
18699 + return 1;
18700 + }
18701 +#endif
18702 +
18703 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18704 + do_group_exit(SIGKILL);
18705 + }
18706 +
18707 + pmd = pax_get_pmd(mm, address);
18708 + if (unlikely(!pmd))
18709 + return 0;
18710 +
18711 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18712 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18713 + pte_unmap_unlock(pte, ptl);
18714 + return 0;
18715 + }
18716 +
18717 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18718 + /* write attempt to a protected page in user mode */
18719 + pte_unmap_unlock(pte, ptl);
18720 + return 0;
18721 + }
18722 +
18723 +#ifdef CONFIG_SMP
18724 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18725 +#else
18726 + if (likely(address > get_limit(regs->cs)))
18727 +#endif
18728 + {
18729 + set_pte(pte, pte_mkread(*pte));
18730 + __flush_tlb_one(address);
18731 + pte_unmap_unlock(pte, ptl);
18732 + up_read(&mm->mmap_sem);
18733 + return 1;
18734 + }
18735 +
18736 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18737 +
18738 + /*
18739 + * PaX: fill DTLB with user rights and retry
18740 + */
18741 + __asm__ __volatile__ (
18742 + "orb %2,(%1)\n"
18743 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18744 +/*
18745 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18746 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18747 + * page fault when examined during a TLB load attempt. this is true not only
18748 + * for PTEs holding a non-present entry but also present entries that will
18749 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18750 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18751 + * for our target pages since their PTEs are simply not in the TLBs at all.
18752 +
18753 + * the best thing in omitting it is that we gain around 15-20% speed in the
18754 + * fast path of the page fault handler and can get rid of tracing since we
18755 + * can no longer flush unintended entries.
18756 + */
18757 + "invlpg (%0)\n"
18758 +#endif
18759 + __copyuser_seg"testb $0,(%0)\n"
18760 + "xorb %3,(%1)\n"
18761 + :
18762 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18763 + : "memory", "cc");
18764 + pte_unmap_unlock(pte, ptl);
18765 + up_read(&mm->mmap_sem);
18766 + return 1;
18767 +}
18768 +#endif
18769 +
18770 /*
18771 * Handle a spurious fault caused by a stale TLB entry.
18772 *
18773 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18774 static inline int
18775 access_error(unsigned long error_code, struct vm_area_struct *vma)
18776 {
18777 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18778 + return 1;
18779 +
18780 if (error_code & PF_WRITE) {
18781 /* write, present and write, not present: */
18782 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18783 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18784 {
18785 struct vm_area_struct *vma;
18786 struct task_struct *tsk;
18787 - unsigned long address;
18788 struct mm_struct *mm;
18789 int fault;
18790 int write = error_code & PF_WRITE;
18791 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18792 (write ? FAULT_FLAG_WRITE : 0);
18793
18794 + /* Get the faulting address: */
18795 + unsigned long address = read_cr2();
18796 +
18797 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18798 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18799 + if (!search_exception_tables(regs->ip)) {
18800 + bad_area_nosemaphore(regs, error_code, address);
18801 + return;
18802 + }
18803 + if (address < PAX_USER_SHADOW_BASE) {
18804 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18805 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18806 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18807 + } else
18808 + address -= PAX_USER_SHADOW_BASE;
18809 + }
18810 +#endif
18811 +
18812 tsk = current;
18813 mm = tsk->mm;
18814
18815 - /* Get the faulting address: */
18816 - address = read_cr2();
18817 -
18818 /*
18819 * Detect and handle instructions that would cause a page fault for
18820 * both a tracked kernel page and a userspace page.
18821 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18822 * User-mode registers count as a user access even for any
18823 * potential system fault or CPU buglet:
18824 */
18825 - if (user_mode_vm(regs)) {
18826 + if (user_mode(regs)) {
18827 local_irq_enable();
18828 error_code |= PF_USER;
18829 } else {
18830 @@ -1103,6 +1351,11 @@ retry:
18831 might_sleep();
18832 }
18833
18834 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18835 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18836 + return;
18837 +#endif
18838 +
18839 vma = find_vma(mm, address);
18840 if (unlikely(!vma)) {
18841 bad_area(regs, error_code, address);
18842 @@ -1114,18 +1367,24 @@ retry:
18843 bad_area(regs, error_code, address);
18844 return;
18845 }
18846 - if (error_code & PF_USER) {
18847 - /*
18848 - * Accessing the stack below %sp is always a bug.
18849 - * The large cushion allows instructions like enter
18850 - * and pusha to work. ("enter $65535, $31" pushes
18851 - * 32 pointers and then decrements %sp by 65535.)
18852 - */
18853 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18854 - bad_area(regs, error_code, address);
18855 - return;
18856 - }
18857 + /*
18858 + * Accessing the stack below %sp is always a bug.
18859 + * The large cushion allows instructions like enter
18860 + * and pusha to work. ("enter $65535, $31" pushes
18861 + * 32 pointers and then decrements %sp by 65535.)
18862 + */
18863 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18864 + bad_area(regs, error_code, address);
18865 + return;
18866 }
18867 +
18868 +#ifdef CONFIG_PAX_SEGMEXEC
18869 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18870 + bad_area(regs, error_code, address);
18871 + return;
18872 + }
18873 +#endif
18874 +
18875 if (unlikely(expand_stack(vma, address))) {
18876 bad_area(regs, error_code, address);
18877 return;
18878 @@ -1180,3 +1439,199 @@ good_area:
18879
18880 up_read(&mm->mmap_sem);
18881 }
18882 +
18883 +#ifdef CONFIG_PAX_EMUTRAMP
18884 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18885 +{
18886 + int err;
18887 +
18888 + do { /* PaX: gcc trampoline emulation #1 */
18889 + unsigned char mov1, mov2;
18890 + unsigned short jmp;
18891 + unsigned int addr1, addr2;
18892 +
18893 +#ifdef CONFIG_X86_64
18894 + if ((regs->ip + 11) >> 32)
18895 + break;
18896 +#endif
18897 +
18898 + err = get_user(mov1, (unsigned char __user *)regs->ip);
18899 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18900 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18901 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18902 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18903 +
18904 + if (err)
18905 + break;
18906 +
18907 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18908 + regs->cx = addr1;
18909 + regs->ax = addr2;
18910 + regs->ip = addr2;
18911 + return 2;
18912 + }
18913 + } while (0);
18914 +
18915 + do { /* PaX: gcc trampoline emulation #2 */
18916 + unsigned char mov, jmp;
18917 + unsigned int addr1, addr2;
18918 +
18919 +#ifdef CONFIG_X86_64
18920 + if ((regs->ip + 9) >> 32)
18921 + break;
18922 +#endif
18923 +
18924 + err = get_user(mov, (unsigned char __user *)regs->ip);
18925 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18926 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18927 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18928 +
18929 + if (err)
18930 + break;
18931 +
18932 + if (mov == 0xB9 && jmp == 0xE9) {
18933 + regs->cx = addr1;
18934 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18935 + return 2;
18936 + }
18937 + } while (0);
18938 +
18939 + return 1; /* PaX in action */
18940 +}
18941 +
18942 +#ifdef CONFIG_X86_64
18943 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18944 +{
18945 + int err;
18946 +
18947 + do { /* PaX: gcc trampoline emulation #1 */
18948 + unsigned short mov1, mov2, jmp1;
18949 + unsigned char jmp2;
18950 + unsigned int addr1;
18951 + unsigned long addr2;
18952 +
18953 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18954 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18955 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18956 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18957 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18958 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18959 +
18960 + if (err)
18961 + break;
18962 +
18963 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18964 + regs->r11 = addr1;
18965 + regs->r10 = addr2;
18966 + regs->ip = addr1;
18967 + return 2;
18968 + }
18969 + } while (0);
18970 +
18971 + do { /* PaX: gcc trampoline emulation #2 */
18972 + unsigned short mov1, mov2, jmp1;
18973 + unsigned char jmp2;
18974 + unsigned long addr1, addr2;
18975 +
18976 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18977 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18978 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18979 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18980 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18981 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18982 +
18983 + if (err)
18984 + break;
18985 +
18986 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18987 + regs->r11 = addr1;
18988 + regs->r10 = addr2;
18989 + regs->ip = addr1;
18990 + return 2;
18991 + }
18992 + } while (0);
18993 +
18994 + return 1; /* PaX in action */
18995 +}
18996 +#endif
18997 +
18998 +/*
18999 + * PaX: decide what to do with offenders (regs->ip = fault address)
19000 + *
19001 + * returns 1 when task should be killed
19002 + * 2 when gcc trampoline was detected
19003 + */
19004 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19005 +{
19006 + if (v8086_mode(regs))
19007 + return 1;
19008 +
19009 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19010 + return 1;
19011 +
19012 +#ifdef CONFIG_X86_32
19013 + return pax_handle_fetch_fault_32(regs);
19014 +#else
19015 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19016 + return pax_handle_fetch_fault_32(regs);
19017 + else
19018 + return pax_handle_fetch_fault_64(regs);
19019 +#endif
19020 +}
19021 +#endif
19022 +
19023 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19024 +void pax_report_insns(void *pc, void *sp)
19025 +{
19026 + long i;
19027 +
19028 + printk(KERN_ERR "PAX: bytes at PC: ");
19029 + for (i = 0; i < 20; i++) {
19030 + unsigned char c;
19031 + if (get_user(c, (__force unsigned char __user *)pc+i))
19032 + printk(KERN_CONT "?? ");
19033 + else
19034 + printk(KERN_CONT "%02x ", c);
19035 + }
19036 + printk("\n");
19037 +
19038 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19039 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19040 + unsigned long c;
19041 + if (get_user(c, (__force unsigned long __user *)sp+i))
19042 +#ifdef CONFIG_X86_32
19043 + printk(KERN_CONT "???????? ");
19044 +#else
19045 + printk(KERN_CONT "???????????????? ");
19046 +#endif
19047 + else
19048 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19049 + }
19050 + printk("\n");
19051 +}
19052 +#endif
19053 +
19054 +/**
19055 + * probe_kernel_write(): safely attempt to write to a location
19056 + * @dst: address to write to
19057 + * @src: pointer to the data that shall be written
19058 + * @size: size of the data chunk
19059 + *
19060 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19061 + * happens, handle that and return -EFAULT.
19062 + */
19063 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19064 +{
19065 + long ret;
19066 + mm_segment_t old_fs = get_fs();
19067 +
19068 + set_fs(KERNEL_DS);
19069 + pagefault_disable();
19070 + pax_open_kernel();
19071 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19072 + pax_close_kernel();
19073 + pagefault_enable();
19074 + set_fs(old_fs);
19075 +
19076 + return ret ? -EFAULT : 0;
19077 +}
19078 diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19079 --- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19080 +++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19081 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19082 addr = start;
19083 len = (unsigned long) nr_pages << PAGE_SHIFT;
19084 end = start + len;
19085 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19086 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19087 (void __user *)start, len)))
19088 return 0;
19089
19090 diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19091 --- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19092 +++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19093 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19094 idx = type + KM_TYPE_NR*smp_processor_id();
19095 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19096 BUG_ON(!pte_none(*(kmap_pte-idx)));
19097 +
19098 + pax_open_kernel();
19099 set_pte(kmap_pte-idx, mk_pte(page, prot));
19100 + pax_close_kernel();
19101
19102 return (void *)vaddr;
19103 }
19104 diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19105 --- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19106 +++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19107 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19108 struct hstate *h = hstate_file(file);
19109 struct mm_struct *mm = current->mm;
19110 struct vm_area_struct *vma;
19111 - unsigned long start_addr;
19112 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19113 +
19114 +#ifdef CONFIG_PAX_SEGMEXEC
19115 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19116 + pax_task_size = SEGMEXEC_TASK_SIZE;
19117 +#endif
19118 +
19119 + pax_task_size -= PAGE_SIZE;
19120
19121 if (len > mm->cached_hole_size) {
19122 - start_addr = mm->free_area_cache;
19123 + start_addr = mm->free_area_cache;
19124 } else {
19125 - start_addr = TASK_UNMAPPED_BASE;
19126 - mm->cached_hole_size = 0;
19127 + start_addr = mm->mmap_base;
19128 + mm->cached_hole_size = 0;
19129 }
19130
19131 full_search:
19132 @@ -280,26 +287,27 @@ full_search:
19133
19134 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19135 /* At this point: (!vma || addr < vma->vm_end). */
19136 - if (TASK_SIZE - len < addr) {
19137 + if (pax_task_size - len < addr) {
19138 /*
19139 * Start a new search - just in case we missed
19140 * some holes.
19141 */
19142 - if (start_addr != TASK_UNMAPPED_BASE) {
19143 - start_addr = TASK_UNMAPPED_BASE;
19144 + if (start_addr != mm->mmap_base) {
19145 + start_addr = mm->mmap_base;
19146 mm->cached_hole_size = 0;
19147 goto full_search;
19148 }
19149 return -ENOMEM;
19150 }
19151 - if (!vma || addr + len <= vma->vm_start) {
19152 - mm->free_area_cache = addr + len;
19153 - return addr;
19154 - }
19155 + if (check_heap_stack_gap(vma, addr, len))
19156 + break;
19157 if (addr + mm->cached_hole_size < vma->vm_start)
19158 mm->cached_hole_size = vma->vm_start - addr;
19159 addr = ALIGN(vma->vm_end, huge_page_size(h));
19160 }
19161 +
19162 + mm->free_area_cache = addr + len;
19163 + return addr;
19164 }
19165
19166 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19167 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19168 {
19169 struct hstate *h = hstate_file(file);
19170 struct mm_struct *mm = current->mm;
19171 - struct vm_area_struct *vma, *prev_vma;
19172 - unsigned long base = mm->mmap_base, addr = addr0;
19173 + struct vm_area_struct *vma;
19174 + unsigned long base = mm->mmap_base, addr;
19175 unsigned long largest_hole = mm->cached_hole_size;
19176 - int first_time = 1;
19177
19178 /* don't allow allocations above current base */
19179 if (mm->free_area_cache > base)
19180 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19181 largest_hole = 0;
19182 mm->free_area_cache = base;
19183 }
19184 -try_again:
19185 +
19186 /* make sure it can fit in the remaining address space */
19187 if (mm->free_area_cache < len)
19188 goto fail;
19189
19190 /* either no address requested or can't fit in requested address hole */
19191 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19192 + addr = (mm->free_area_cache - len);
19193 do {
19194 + addr &= huge_page_mask(h);
19195 + vma = find_vma(mm, addr);
19196 /*
19197 * Lookup failure means no vma is above this address,
19198 * i.e. return with success:
19199 - */
19200 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19201 - return addr;
19202 -
19203 - /*
19204 * new region fits between prev_vma->vm_end and
19205 * vma->vm_start, use it:
19206 */
19207 - if (addr + len <= vma->vm_start &&
19208 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19209 + if (check_heap_stack_gap(vma, addr, len)) {
19210 /* remember the address as a hint for next time */
19211 - mm->cached_hole_size = largest_hole;
19212 - return (mm->free_area_cache = addr);
19213 - } else {
19214 - /* pull free_area_cache down to the first hole */
19215 - if (mm->free_area_cache == vma->vm_end) {
19216 - mm->free_area_cache = vma->vm_start;
19217 - mm->cached_hole_size = largest_hole;
19218 - }
19219 + mm->cached_hole_size = largest_hole;
19220 + return (mm->free_area_cache = addr);
19221 + }
19222 + /* pull free_area_cache down to the first hole */
19223 + if (mm->free_area_cache == vma->vm_end) {
19224 + mm->free_area_cache = vma->vm_start;
19225 + mm->cached_hole_size = largest_hole;
19226 }
19227
19228 /* remember the largest hole we saw so far */
19229 if (addr + largest_hole < vma->vm_start)
19230 - largest_hole = vma->vm_start - addr;
19231 + largest_hole = vma->vm_start - addr;
19232
19233 /* try just below the current vma->vm_start */
19234 - addr = (vma->vm_start - len) & huge_page_mask(h);
19235 - } while (len <= vma->vm_start);
19236 + addr = skip_heap_stack_gap(vma, len);
19237 + } while (!IS_ERR_VALUE(addr));
19238
19239 fail:
19240 /*
19241 - * if hint left us with no space for the requested
19242 - * mapping then try again:
19243 - */
19244 - if (first_time) {
19245 - mm->free_area_cache = base;
19246 - largest_hole = 0;
19247 - first_time = 0;
19248 - goto try_again;
19249 - }
19250 - /*
19251 * A failed mmap() very likely causes application failure,
19252 * so fall back to the bottom-up function here. This scenario
19253 * can happen with large stack limits and large mmap()
19254 * allocations.
19255 */
19256 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19257 +
19258 +#ifdef CONFIG_PAX_SEGMEXEC
19259 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19260 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19261 + else
19262 +#endif
19263 +
19264 + mm->mmap_base = TASK_UNMAPPED_BASE;
19265 +
19266 +#ifdef CONFIG_PAX_RANDMMAP
19267 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19268 + mm->mmap_base += mm->delta_mmap;
19269 +#endif
19270 +
19271 + mm->free_area_cache = mm->mmap_base;
19272 mm->cached_hole_size = ~0UL;
19273 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19274 len, pgoff, flags);
19275 @@ -386,6 +392,7 @@ fail:
19276 /*
19277 * Restore the topdown base:
19278 */
19279 + mm->mmap_base = base;
19280 mm->free_area_cache = base;
19281 mm->cached_hole_size = ~0UL;
19282
19283 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19284 struct hstate *h = hstate_file(file);
19285 struct mm_struct *mm = current->mm;
19286 struct vm_area_struct *vma;
19287 + unsigned long pax_task_size = TASK_SIZE;
19288
19289 if (len & ~huge_page_mask(h))
19290 return -EINVAL;
19291 - if (len > TASK_SIZE)
19292 +
19293 +#ifdef CONFIG_PAX_SEGMEXEC
19294 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19295 + pax_task_size = SEGMEXEC_TASK_SIZE;
19296 +#endif
19297 +
19298 + pax_task_size -= PAGE_SIZE;
19299 +
19300 + if (len > pax_task_size)
19301 return -ENOMEM;
19302
19303 if (flags & MAP_FIXED) {
19304 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19305 if (addr) {
19306 addr = ALIGN(addr, huge_page_size(h));
19307 vma = find_vma(mm, addr);
19308 - if (TASK_SIZE - len >= addr &&
19309 - (!vma || addr + len <= vma->vm_start))
19310 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19311 return addr;
19312 }
19313 if (mm->get_unmapped_area == arch_get_unmapped_area)
19314 diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19315 --- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19316 +++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19317 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19318 }
19319
19320 /*
19321 - * Creates a middle page table and puts a pointer to it in the
19322 - * given global directory entry. This only returns the gd entry
19323 - * in non-PAE compilation mode, since the middle layer is folded.
19324 - */
19325 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19326 -{
19327 - pud_t *pud;
19328 - pmd_t *pmd_table;
19329 -
19330 -#ifdef CONFIG_X86_PAE
19331 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19332 - if (after_bootmem)
19333 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19334 - else
19335 - pmd_table = (pmd_t *)alloc_low_page();
19336 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19337 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19338 - pud = pud_offset(pgd, 0);
19339 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19340 -
19341 - return pmd_table;
19342 - }
19343 -#endif
19344 - pud = pud_offset(pgd, 0);
19345 - pmd_table = pmd_offset(pud, 0);
19346 -
19347 - return pmd_table;
19348 -}
19349 -
19350 -/*
19351 * Create a page table and place a pointer to it in a middle page
19352 * directory entry:
19353 */
19354 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19355 page_table = (pte_t *)alloc_low_page();
19356
19357 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19358 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19359 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19360 +#else
19361 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19362 +#endif
19363 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19364 }
19365
19366 return pte_offset_kernel(pmd, 0);
19367 }
19368
19369 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19370 +{
19371 + pud_t *pud;
19372 + pmd_t *pmd_table;
19373 +
19374 + pud = pud_offset(pgd, 0);
19375 + pmd_table = pmd_offset(pud, 0);
19376 +
19377 + return pmd_table;
19378 +}
19379 +
19380 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19381 {
19382 int pgd_idx = pgd_index(vaddr);
19383 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19384 int pgd_idx, pmd_idx;
19385 unsigned long vaddr;
19386 pgd_t *pgd;
19387 + pud_t *pud;
19388 pmd_t *pmd;
19389 pte_t *pte = NULL;
19390
19391 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19392 pgd = pgd_base + pgd_idx;
19393
19394 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19395 - pmd = one_md_table_init(pgd);
19396 - pmd = pmd + pmd_index(vaddr);
19397 + pud = pud_offset(pgd, vaddr);
19398 + pmd = pmd_offset(pud, vaddr);
19399 +
19400 +#ifdef CONFIG_X86_PAE
19401 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19402 +#endif
19403 +
19404 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19405 pmd++, pmd_idx++) {
19406 pte = page_table_kmap_check(one_page_table_init(pmd),
19407 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19408 }
19409 }
19410
19411 -static inline int is_kernel_text(unsigned long addr)
19412 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19413 {
19414 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19415 - return 1;
19416 - return 0;
19417 + if ((start > ktla_ktva((unsigned long)_etext) ||
19418 + end <= ktla_ktva((unsigned long)_stext)) &&
19419 + (start > ktla_ktva((unsigned long)_einittext) ||
19420 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19421 +
19422 +#ifdef CONFIG_ACPI_SLEEP
19423 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19424 +#endif
19425 +
19426 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19427 + return 0;
19428 + return 1;
19429 }
19430
19431 /*
19432 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19433 unsigned long last_map_addr = end;
19434 unsigned long start_pfn, end_pfn;
19435 pgd_t *pgd_base = swapper_pg_dir;
19436 - int pgd_idx, pmd_idx, pte_ofs;
19437 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19438 unsigned long pfn;
19439 pgd_t *pgd;
19440 + pud_t *pud;
19441 pmd_t *pmd;
19442 pte_t *pte;
19443 unsigned pages_2m, pages_4k;
19444 @@ -281,8 +282,13 @@ repeat:
19445 pfn = start_pfn;
19446 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19447 pgd = pgd_base + pgd_idx;
19448 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19449 - pmd = one_md_table_init(pgd);
19450 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19451 + pud = pud_offset(pgd, 0);
19452 + pmd = pmd_offset(pud, 0);
19453 +
19454 +#ifdef CONFIG_X86_PAE
19455 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19456 +#endif
19457
19458 if (pfn >= end_pfn)
19459 continue;
19460 @@ -294,14 +300,13 @@ repeat:
19461 #endif
19462 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19463 pmd++, pmd_idx++) {
19464 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19465 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19466
19467 /*
19468 * Map with big pages if possible, otherwise
19469 * create normal page tables:
19470 */
19471 if (use_pse) {
19472 - unsigned int addr2;
19473 pgprot_t prot = PAGE_KERNEL_LARGE;
19474 /*
19475 * first pass will use the same initial
19476 @@ -311,11 +316,7 @@ repeat:
19477 __pgprot(PTE_IDENT_ATTR |
19478 _PAGE_PSE);
19479
19480 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19481 - PAGE_OFFSET + PAGE_SIZE-1;
19482 -
19483 - if (is_kernel_text(addr) ||
19484 - is_kernel_text(addr2))
19485 + if (is_kernel_text(address, address + PMD_SIZE))
19486 prot = PAGE_KERNEL_LARGE_EXEC;
19487
19488 pages_2m++;
19489 @@ -332,7 +333,7 @@ repeat:
19490 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19491 pte += pte_ofs;
19492 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19493 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19494 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19495 pgprot_t prot = PAGE_KERNEL;
19496 /*
19497 * first pass will use the same initial
19498 @@ -340,7 +341,7 @@ repeat:
19499 */
19500 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19501
19502 - if (is_kernel_text(addr))
19503 + if (is_kernel_text(address, address + PAGE_SIZE))
19504 prot = PAGE_KERNEL_EXEC;
19505
19506 pages_4k++;
19507 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19508
19509 pud = pud_offset(pgd, va);
19510 pmd = pmd_offset(pud, va);
19511 - if (!pmd_present(*pmd))
19512 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19513 break;
19514
19515 pte = pte_offset_kernel(pmd, va);
19516 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19517
19518 static void __init pagetable_init(void)
19519 {
19520 - pgd_t *pgd_base = swapper_pg_dir;
19521 -
19522 - permanent_kmaps_init(pgd_base);
19523 + permanent_kmaps_init(swapper_pg_dir);
19524 }
19525
19526 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19527 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19528 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19529
19530 /* user-defined highmem size */
19531 @@ -757,6 +756,12 @@ void __init mem_init(void)
19532
19533 pci_iommu_alloc();
19534
19535 +#ifdef CONFIG_PAX_PER_CPU_PGD
19536 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19537 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19538 + KERNEL_PGD_PTRS);
19539 +#endif
19540 +
19541 #ifdef CONFIG_FLATMEM
19542 BUG_ON(!mem_map);
19543 #endif
19544 @@ -774,7 +779,7 @@ void __init mem_init(void)
19545 set_highmem_pages_init();
19546
19547 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19548 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19549 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19550 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19551
19552 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19553 @@ -815,10 +820,10 @@ void __init mem_init(void)
19554 ((unsigned long)&__init_end -
19555 (unsigned long)&__init_begin) >> 10,
19556
19557 - (unsigned long)&_etext, (unsigned long)&_edata,
19558 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19559 + (unsigned long)&_sdata, (unsigned long)&_edata,
19560 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19561
19562 - (unsigned long)&_text, (unsigned long)&_etext,
19563 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19564 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19565
19566 /*
19567 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19568 if (!kernel_set_to_readonly)
19569 return;
19570
19571 + start = ktla_ktva(start);
19572 pr_debug("Set kernel text: %lx - %lx for read write\n",
19573 start, start+size);
19574
19575 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19576 if (!kernel_set_to_readonly)
19577 return;
19578
19579 + start = ktla_ktva(start);
19580 pr_debug("Set kernel text: %lx - %lx for read only\n",
19581 start, start+size);
19582
19583 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19584 unsigned long start = PFN_ALIGN(_text);
19585 unsigned long size = PFN_ALIGN(_etext) - start;
19586
19587 + start = ktla_ktva(start);
19588 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19589 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19590 size >> 10);
19591 diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
19592 --- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19593 +++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19594 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19595 * around without checking the pgd every time.
19596 */
19597
19598 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19599 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19600 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19601
19602 int force_personality32;
19603 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19604
19605 for (address = start; address <= end; address += PGDIR_SIZE) {
19606 const pgd_t *pgd_ref = pgd_offset_k(address);
19607 +
19608 +#ifdef CONFIG_PAX_PER_CPU_PGD
19609 + unsigned long cpu;
19610 +#else
19611 struct page *page;
19612 +#endif
19613
19614 if (pgd_none(*pgd_ref))
19615 continue;
19616
19617 spin_lock(&pgd_lock);
19618 +
19619 +#ifdef CONFIG_PAX_PER_CPU_PGD
19620 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19621 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19622 +#else
19623 list_for_each_entry(page, &pgd_list, lru) {
19624 pgd_t *pgd;
19625 spinlock_t *pgt_lock;
19626 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19627 /* the pgt_lock only for Xen */
19628 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19629 spin_lock(pgt_lock);
19630 +#endif
19631
19632 if (pgd_none(*pgd))
19633 set_pgd(pgd, *pgd_ref);
19634 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19635 BUG_ON(pgd_page_vaddr(*pgd)
19636 != pgd_page_vaddr(*pgd_ref));
19637
19638 +#ifndef CONFIG_PAX_PER_CPU_PGD
19639 spin_unlock(pgt_lock);
19640 +#endif
19641 +
19642 }
19643 spin_unlock(&pgd_lock);
19644 }
19645 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19646 pmd = fill_pmd(pud, vaddr);
19647 pte = fill_pte(pmd, vaddr);
19648
19649 + pax_open_kernel();
19650 set_pte(pte, new_pte);
19651 + pax_close_kernel();
19652
19653 /*
19654 * It's enough to flush this one mapping.
19655 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19656 pgd = pgd_offset_k((unsigned long)__va(phys));
19657 if (pgd_none(*pgd)) {
19658 pud = (pud_t *) spp_getpage();
19659 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19660 - _PAGE_USER));
19661 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19662 }
19663 pud = pud_offset(pgd, (unsigned long)__va(phys));
19664 if (pud_none(*pud)) {
19665 pmd = (pmd_t *) spp_getpage();
19666 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19667 - _PAGE_USER));
19668 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19669 }
19670 pmd = pmd_offset(pud, phys);
19671 BUG_ON(!pmd_none(*pmd));
19672 @@ -693,6 +707,12 @@ void __init mem_init(void)
19673
19674 pci_iommu_alloc();
19675
19676 +#ifdef CONFIG_PAX_PER_CPU_PGD
19677 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19678 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19679 + KERNEL_PGD_PTRS);
19680 +#endif
19681 +
19682 /* clear_bss() already clear the empty_zero_page */
19683
19684 reservedpages = 0;
19685 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19686 static struct vm_area_struct gate_vma = {
19687 .vm_start = VSYSCALL_START,
19688 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19689 - .vm_page_prot = PAGE_READONLY_EXEC,
19690 - .vm_flags = VM_READ | VM_EXEC
19691 + .vm_page_prot = PAGE_READONLY,
19692 + .vm_flags = VM_READ
19693 };
19694
19695 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19696 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19697
19698 const char *arch_vma_name(struct vm_area_struct *vma)
19699 {
19700 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19701 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19702 return "[vdso]";
19703 if (vma == &gate_vma)
19704 return "[vsyscall]";
19705 diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
19706 --- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19707 +++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19708 @@ -31,7 +31,7 @@ int direct_gbpages
19709 static void __init find_early_table_space(unsigned long end, int use_pse,
19710 int use_gbpages)
19711 {
19712 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19713 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19714 phys_addr_t base;
19715
19716 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19717 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19718 */
19719 int devmem_is_allowed(unsigned long pagenr)
19720 {
19721 - if (pagenr <= 256)
19722 +#ifdef CONFIG_GRKERNSEC_KMEM
19723 + /* allow BDA */
19724 + if (!pagenr)
19725 + return 1;
19726 + /* allow EBDA */
19727 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19728 + return 1;
19729 +#else
19730 + if (!pagenr)
19731 + return 1;
19732 +#ifdef CONFIG_VM86
19733 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19734 + return 1;
19735 +#endif
19736 +#endif
19737 +
19738 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19739 return 1;
19740 +#ifdef CONFIG_GRKERNSEC_KMEM
19741 + /* throw out everything else below 1MB */
19742 + if (pagenr <= 256)
19743 + return 0;
19744 +#endif
19745 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19746 return 0;
19747 if (!page_is_ram(pagenr))
19748 return 1;
19749 +
19750 return 0;
19751 }
19752
19753 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19754
19755 void free_initmem(void)
19756 {
19757 +
19758 +#ifdef CONFIG_PAX_KERNEXEC
19759 +#ifdef CONFIG_X86_32
19760 + /* PaX: limit KERNEL_CS to actual size */
19761 + unsigned long addr, limit;
19762 + struct desc_struct d;
19763 + int cpu;
19764 +
19765 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19766 + limit = (limit - 1UL) >> PAGE_SHIFT;
19767 +
19768 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19769 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19770 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19771 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19772 + }
19773 +
19774 + /* PaX: make KERNEL_CS read-only */
19775 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19776 + if (!paravirt_enabled())
19777 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19778 +/*
19779 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19780 + pgd = pgd_offset_k(addr);
19781 + pud = pud_offset(pgd, addr);
19782 + pmd = pmd_offset(pud, addr);
19783 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19784 + }
19785 +*/
19786 +#ifdef CONFIG_X86_PAE
19787 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19788 +/*
19789 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19790 + pgd = pgd_offset_k(addr);
19791 + pud = pud_offset(pgd, addr);
19792 + pmd = pmd_offset(pud, addr);
19793 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19794 + }
19795 +*/
19796 +#endif
19797 +
19798 +#ifdef CONFIG_MODULES
19799 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19800 +#endif
19801 +
19802 +#else
19803 + pgd_t *pgd;
19804 + pud_t *pud;
19805 + pmd_t *pmd;
19806 + unsigned long addr, end;
19807 +
19808 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19809 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19810 + pgd = pgd_offset_k(addr);
19811 + pud = pud_offset(pgd, addr);
19812 + pmd = pmd_offset(pud, addr);
19813 + if (!pmd_present(*pmd))
19814 + continue;
19815 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19816 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19817 + else
19818 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19819 + }
19820 +
19821 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19822 + end = addr + KERNEL_IMAGE_SIZE;
19823 + for (; addr < end; addr += PMD_SIZE) {
19824 + pgd = pgd_offset_k(addr);
19825 + pud = pud_offset(pgd, addr);
19826 + pmd = pmd_offset(pud, addr);
19827 + if (!pmd_present(*pmd))
19828 + continue;
19829 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19830 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19831 + }
19832 +#endif
19833 +
19834 + flush_tlb_all();
19835 +#endif
19836 +
19837 free_init_pages("unused kernel memory",
19838 (unsigned long)(&__init_begin),
19839 (unsigned long)(&__init_end));
19840 diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
19841 --- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19842 +++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19843 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19844 type = kmap_atomic_idx_push();
19845 idx = type + KM_TYPE_NR * smp_processor_id();
19846 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19847 +
19848 + pax_open_kernel();
19849 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19850 + pax_close_kernel();
19851 +
19852 arch_flush_lazy_mmu_mode();
19853
19854 return (void *)vaddr;
19855 diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
19856 --- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19857 +++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19858 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19859 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19860 int is_ram = page_is_ram(pfn);
19861
19862 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19863 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19864 return NULL;
19865 WARN_ON_ONCE(is_ram);
19866 }
19867 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19868 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19869
19870 static __initdata int after_paging_init;
19871 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19872 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19873
19874 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19875 {
19876 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19877 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19878
19879 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19880 - memset(bm_pte, 0, sizeof(bm_pte));
19881 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
19882 + pmd_populate_user(&init_mm, pmd, bm_pte);
19883
19884 /*
19885 * The boot-ioremap range spans multiple pmds, for which
19886 diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
19887 --- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19888 +++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19889 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19890 * memory (e.g. tracked pages)? For now, we need this to avoid
19891 * invoking kmemcheck for PnP BIOS calls.
19892 */
19893 - if (regs->flags & X86_VM_MASK)
19894 + if (v8086_mode(regs))
19895 return false;
19896 - if (regs->cs != __KERNEL_CS)
19897 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19898 return false;
19899
19900 pte = kmemcheck_pte_lookup(address);
19901 diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
19902 --- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19903 +++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19904 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19905 * Leave an at least ~128 MB hole with possible stack randomization.
19906 */
19907 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19908 -#define MAX_GAP (TASK_SIZE/6*5)
19909 +#define MAX_GAP (pax_task_size/6*5)
19910
19911 /*
19912 * True on X86_32 or when emulating IA32 on X86_64
19913 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19914 return rnd << PAGE_SHIFT;
19915 }
19916
19917 -static unsigned long mmap_base(void)
19918 +static unsigned long mmap_base(struct mm_struct *mm)
19919 {
19920 unsigned long gap = rlimit(RLIMIT_STACK);
19921 + unsigned long pax_task_size = TASK_SIZE;
19922 +
19923 +#ifdef CONFIG_PAX_SEGMEXEC
19924 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19925 + pax_task_size = SEGMEXEC_TASK_SIZE;
19926 +#endif
19927
19928 if (gap < MIN_GAP)
19929 gap = MIN_GAP;
19930 else if (gap > MAX_GAP)
19931 gap = MAX_GAP;
19932
19933 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19934 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19935 }
19936
19937 /*
19938 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19939 * does, but not when emulating X86_32
19940 */
19941 -static unsigned long mmap_legacy_base(void)
19942 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19943 {
19944 - if (mmap_is_ia32())
19945 + if (mmap_is_ia32()) {
19946 +
19947 +#ifdef CONFIG_PAX_SEGMEXEC
19948 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19949 + return SEGMEXEC_TASK_UNMAPPED_BASE;
19950 + else
19951 +#endif
19952 +
19953 return TASK_UNMAPPED_BASE;
19954 - else
19955 + } else
19956 return TASK_UNMAPPED_BASE + mmap_rnd();
19957 }
19958
19959 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19960 void arch_pick_mmap_layout(struct mm_struct *mm)
19961 {
19962 if (mmap_is_legacy()) {
19963 - mm->mmap_base = mmap_legacy_base();
19964 + mm->mmap_base = mmap_legacy_base(mm);
19965 +
19966 +#ifdef CONFIG_PAX_RANDMMAP
19967 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19968 + mm->mmap_base += mm->delta_mmap;
19969 +#endif
19970 +
19971 mm->get_unmapped_area = arch_get_unmapped_area;
19972 mm->unmap_area = arch_unmap_area;
19973 } else {
19974 - mm->mmap_base = mmap_base();
19975 + mm->mmap_base = mmap_base(mm);
19976 +
19977 +#ifdef CONFIG_PAX_RANDMMAP
19978 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19979 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19980 +#endif
19981 +
19982 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19983 mm->unmap_area = arch_unmap_area_topdown;
19984 }
19985 diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
19986 --- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19987 +++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19988 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19989 break;
19990 default:
19991 {
19992 - unsigned char *ip = (unsigned char *)instptr;
19993 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19994 my_trace->opcode = MMIO_UNKNOWN_OP;
19995 my_trace->width = 0;
19996 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19997 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19998 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19999 void __iomem *addr)
20000 {
20001 - static atomic_t next_id;
20002 + static atomic_unchecked_t next_id;
20003 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20004 /* These are page-unaligned. */
20005 struct mmiotrace_map map = {
20006 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20007 .private = trace
20008 },
20009 .phys = offset,
20010 - .id = atomic_inc_return(&next_id)
20011 + .id = atomic_inc_return_unchecked(&next_id)
20012 };
20013 map.map_id = trace->id;
20014
20015 diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20016 --- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20017 +++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20018 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20019 */
20020 #ifdef CONFIG_PCI_BIOS
20021 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20022 - pgprot_val(forbidden) |= _PAGE_NX;
20023 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20024 #endif
20025
20026 /*
20027 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20028 * Does not cover __inittext since that is gone later on. On
20029 * 64bit we do not enforce !NX on the low mapping
20030 */
20031 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20032 - pgprot_val(forbidden) |= _PAGE_NX;
20033 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20034 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20035
20036 +#ifdef CONFIG_DEBUG_RODATA
20037 /*
20038 * The .rodata section needs to be read-only. Using the pfn
20039 * catches all aliases.
20040 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20041 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20042 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20043 pgprot_val(forbidden) |= _PAGE_RW;
20044 +#endif
20045
20046 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20047 /*
20048 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20049 }
20050 #endif
20051
20052 +#ifdef CONFIG_PAX_KERNEXEC
20053 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20054 + pgprot_val(forbidden) |= _PAGE_RW;
20055 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20056 + }
20057 +#endif
20058 +
20059 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20060
20061 return prot;
20062 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20063 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20064 {
20065 /* change init_mm */
20066 + pax_open_kernel();
20067 set_pte_atomic(kpte, pte);
20068 +
20069 #ifdef CONFIG_X86_32
20070 if (!SHARED_KERNEL_PMD) {
20071 +
20072 +#ifdef CONFIG_PAX_PER_CPU_PGD
20073 + unsigned long cpu;
20074 +#else
20075 struct page *page;
20076 +#endif
20077
20078 +#ifdef CONFIG_PAX_PER_CPU_PGD
20079 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20080 + pgd_t *pgd = get_cpu_pgd(cpu);
20081 +#else
20082 list_for_each_entry(page, &pgd_list, lru) {
20083 - pgd_t *pgd;
20084 + pgd_t *pgd = (pgd_t *)page_address(page);
20085 +#endif
20086 +
20087 pud_t *pud;
20088 pmd_t *pmd;
20089
20090 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20091 + pgd += pgd_index(address);
20092 pud = pud_offset(pgd, address);
20093 pmd = pmd_offset(pud, address);
20094 set_pte_atomic((pte_t *)pmd, pte);
20095 }
20096 }
20097 #endif
20098 + pax_close_kernel();
20099 }
20100
20101 static int
20102 diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20103 --- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20104 +++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20105 @@ -36,7 +36,7 @@ enum {
20106
20107 static int pte_testbit(pte_t pte)
20108 {
20109 - return pte_flags(pte) & _PAGE_UNUSED1;
20110 + return pte_flags(pte) & _PAGE_CPA_TEST;
20111 }
20112
20113 struct split_state {
20114 diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20115 --- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20116 +++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20117 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20118
20119 if (!entry) {
20120 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20121 - current->comm, current->pid, start, end);
20122 + current->comm, task_pid_nr(current), start, end);
20123 return -EINVAL;
20124 }
20125
20126 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20127 while (cursor < to) {
20128 if (!devmem_is_allowed(pfn)) {
20129 printk(KERN_INFO
20130 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20131 - current->comm, from, to);
20132 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20133 + current->comm, from, to, cursor);
20134 return 0;
20135 }
20136 cursor += PAGE_SIZE;
20137 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20138 printk(KERN_INFO
20139 "%s:%d ioremap_change_attr failed %s "
20140 "for %Lx-%Lx\n",
20141 - current->comm, current->pid,
20142 + current->comm, task_pid_nr(current),
20143 cattr_name(flags),
20144 base, (unsigned long long)(base + size));
20145 return -EINVAL;
20146 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20147 if (want_flags != flags) {
20148 printk(KERN_WARNING
20149 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20150 - current->comm, current->pid,
20151 + current->comm, task_pid_nr(current),
20152 cattr_name(want_flags),
20153 (unsigned long long)paddr,
20154 (unsigned long long)(paddr + size),
20155 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20156 free_memtype(paddr, paddr + size);
20157 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20158 " for %Lx-%Lx, got %s\n",
20159 - current->comm, current->pid,
20160 + current->comm, task_pid_nr(current),
20161 cattr_name(want_flags),
20162 (unsigned long long)paddr,
20163 (unsigned long long)(paddr + size),
20164 diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20165 --- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20166 +++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20167 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20168 int i;
20169 enum reason_type rv = OTHERS;
20170
20171 - p = (unsigned char *)ins_addr;
20172 + p = (unsigned char *)ktla_ktva(ins_addr);
20173 p += skip_prefix(p, &prf);
20174 p += get_opcode(p, &opcode);
20175
20176 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20177 struct prefix_bits prf;
20178 int i;
20179
20180 - p = (unsigned char *)ins_addr;
20181 + p = (unsigned char *)ktla_ktva(ins_addr);
20182 p += skip_prefix(p, &prf);
20183 p += get_opcode(p, &opcode);
20184
20185 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20186 struct prefix_bits prf;
20187 int i;
20188
20189 - p = (unsigned char *)ins_addr;
20190 + p = (unsigned char *)ktla_ktva(ins_addr);
20191 p += skip_prefix(p, &prf);
20192 p += get_opcode(p, &opcode);
20193
20194 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20195 struct prefix_bits prf;
20196 int i;
20197
20198 - p = (unsigned char *)ins_addr;
20199 + p = (unsigned char *)ktla_ktva(ins_addr);
20200 p += skip_prefix(p, &prf);
20201 p += get_opcode(p, &opcode);
20202 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20203 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20204 struct prefix_bits prf;
20205 int i;
20206
20207 - p = (unsigned char *)ins_addr;
20208 + p = (unsigned char *)ktla_ktva(ins_addr);
20209 p += skip_prefix(p, &prf);
20210 p += get_opcode(p, &opcode);
20211 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20212 diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20213 --- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20214 +++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20215 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20216 return;
20217 }
20218 pte = pte_offset_kernel(pmd, vaddr);
20219 +
20220 + pax_open_kernel();
20221 if (pte_val(pteval))
20222 set_pte_at(&init_mm, vaddr, pte, pteval);
20223 else
20224 pte_clear(&init_mm, vaddr, pte);
20225 + pax_close_kernel();
20226
20227 /*
20228 * It's enough to flush this one mapping.
20229 diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20230 --- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20231 +++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20232 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20233 list_del(&page->lru);
20234 }
20235
20236 -#define UNSHARED_PTRS_PER_PGD \
20237 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20238 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20239 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20240
20241 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20242 +{
20243 + while (count--)
20244 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20245 +}
20246 +#endif
20247 +
20248 +#ifdef CONFIG_PAX_PER_CPU_PGD
20249 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20250 +{
20251 + while (count--)
20252 +
20253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20254 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20255 +#else
20256 + *dst++ = *src++;
20257 +#endif
20258
20259 +}
20260 +#endif
20261 +
20262 +#ifdef CONFIG_X86_64
20263 +#define pxd_t pud_t
20264 +#define pyd_t pgd_t
20265 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20266 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20267 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20268 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20269 +#define PYD_SIZE PGDIR_SIZE
20270 +#else
20271 +#define pxd_t pmd_t
20272 +#define pyd_t pud_t
20273 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20274 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20275 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20276 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20277 +#define PYD_SIZE PUD_SIZE
20278 +#endif
20279 +
20280 +#ifdef CONFIG_PAX_PER_CPU_PGD
20281 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20282 +static inline void pgd_dtor(pgd_t *pgd) {}
20283 +#else
20284 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20285 {
20286 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20287 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20288 pgd_list_del(pgd);
20289 spin_unlock(&pgd_lock);
20290 }
20291 +#endif
20292
20293 /*
20294 * List of all pgd's needed for non-PAE so it can invalidate entries
20295 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20296 * -- wli
20297 */
20298
20299 -#ifdef CONFIG_X86_PAE
20300 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20301 /*
20302 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20303 * updating the top-level pagetable entries to guarantee the
20304 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20305 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20306 * and initialize the kernel pmds here.
20307 */
20308 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20309 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20310
20311 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20312 {
20313 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20314 */
20315 flush_tlb_mm(mm);
20316 }
20317 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20318 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20319 #else /* !CONFIG_X86_PAE */
20320
20321 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20322 -#define PREALLOCATED_PMDS 0
20323 +#define PREALLOCATED_PXDS 0
20324
20325 #endif /* CONFIG_X86_PAE */
20326
20327 -static void free_pmds(pmd_t *pmds[])
20328 +static void free_pxds(pxd_t *pxds[])
20329 {
20330 int i;
20331
20332 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20333 - if (pmds[i])
20334 - free_page((unsigned long)pmds[i]);
20335 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20336 + if (pxds[i])
20337 + free_page((unsigned long)pxds[i]);
20338 }
20339
20340 -static int preallocate_pmds(pmd_t *pmds[])
20341 +static int preallocate_pxds(pxd_t *pxds[])
20342 {
20343 int i;
20344 bool failed = false;
20345
20346 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20347 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20348 - if (pmd == NULL)
20349 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20350 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20351 + if (pxd == NULL)
20352 failed = true;
20353 - pmds[i] = pmd;
20354 + pxds[i] = pxd;
20355 }
20356
20357 if (failed) {
20358 - free_pmds(pmds);
20359 + free_pxds(pxds);
20360 return -ENOMEM;
20361 }
20362
20363 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20364 * preallocate which never got a corresponding vma will need to be
20365 * freed manually.
20366 */
20367 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20368 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20369 {
20370 int i;
20371
20372 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20373 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20374 pgd_t pgd = pgdp[i];
20375
20376 if (pgd_val(pgd) != 0) {
20377 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20378 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20379
20380 - pgdp[i] = native_make_pgd(0);
20381 + set_pgd(pgdp + i, native_make_pgd(0));
20382
20383 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20384 - pmd_free(mm, pmd);
20385 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20386 + pxd_free(mm, pxd);
20387 }
20388 }
20389 }
20390
20391 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20392 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20393 {
20394 - pud_t *pud;
20395 + pyd_t *pyd;
20396 unsigned long addr;
20397 int i;
20398
20399 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20400 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20401 return;
20402
20403 - pud = pud_offset(pgd, 0);
20404 +#ifdef CONFIG_X86_64
20405 + pyd = pyd_offset(mm, 0L);
20406 +#else
20407 + pyd = pyd_offset(pgd, 0L);
20408 +#endif
20409
20410 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20411 - i++, pud++, addr += PUD_SIZE) {
20412 - pmd_t *pmd = pmds[i];
20413 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20414 + i++, pyd++, addr += PYD_SIZE) {
20415 + pxd_t *pxd = pxds[i];
20416
20417 if (i >= KERNEL_PGD_BOUNDARY)
20418 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20419 - sizeof(pmd_t) * PTRS_PER_PMD);
20420 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20421 + sizeof(pxd_t) * PTRS_PER_PMD);
20422
20423 - pud_populate(mm, pud, pmd);
20424 + pyd_populate(mm, pyd, pxd);
20425 }
20426 }
20427
20428 pgd_t *pgd_alloc(struct mm_struct *mm)
20429 {
20430 pgd_t *pgd;
20431 - pmd_t *pmds[PREALLOCATED_PMDS];
20432 + pxd_t *pxds[PREALLOCATED_PXDS];
20433
20434 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20435
20436 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20437
20438 mm->pgd = pgd;
20439
20440 - if (preallocate_pmds(pmds) != 0)
20441 + if (preallocate_pxds(pxds) != 0)
20442 goto out_free_pgd;
20443
20444 if (paravirt_pgd_alloc(mm) != 0)
20445 - goto out_free_pmds;
20446 + goto out_free_pxds;
20447
20448 /*
20449 * Make sure that pre-populating the pmds is atomic with
20450 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20451 spin_lock(&pgd_lock);
20452
20453 pgd_ctor(mm, pgd);
20454 - pgd_prepopulate_pmd(mm, pgd, pmds);
20455 + pgd_prepopulate_pxd(mm, pgd, pxds);
20456
20457 spin_unlock(&pgd_lock);
20458
20459 return pgd;
20460
20461 -out_free_pmds:
20462 - free_pmds(pmds);
20463 +out_free_pxds:
20464 + free_pxds(pxds);
20465 out_free_pgd:
20466 free_page((unsigned long)pgd);
20467 out:
20468 @@ -295,7 +344,7 @@ out:
20469
20470 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20471 {
20472 - pgd_mop_up_pmds(mm, pgd);
20473 + pgd_mop_up_pxds(mm, pgd);
20474 pgd_dtor(pgd);
20475 paravirt_pgd_free(mm, pgd);
20476 free_page((unsigned long)pgd);
20477 diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20478 --- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20479 +++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20480 @@ -5,8 +5,10 @@
20481 #include <asm/pgtable.h>
20482 #include <asm/proto.h>
20483
20484 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20485 static int disable_nx __cpuinitdata;
20486
20487 +#ifndef CONFIG_PAX_PAGEEXEC
20488 /*
20489 * noexec = on|off
20490 *
20491 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20492 return 0;
20493 }
20494 early_param("noexec", noexec_setup);
20495 +#endif
20496 +
20497 +#endif
20498
20499 void __cpuinit x86_configure_nx(void)
20500 {
20501 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20502 if (cpu_has_nx && !disable_nx)
20503 __supported_pte_mask |= _PAGE_NX;
20504 else
20505 +#endif
20506 __supported_pte_mask &= ~_PAGE_NX;
20507 }
20508
20509 diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20510 --- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20511 +++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20512 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20513 BUG();
20514 cpumask_clear_cpu(cpu,
20515 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20516 +
20517 +#ifndef CONFIG_PAX_PER_CPU_PGD
20518 load_cr3(swapper_pg_dir);
20519 +#endif
20520 +
20521 }
20522 EXPORT_SYMBOL_GPL(leave_mm);
20523
20524 diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20525 --- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20526 +++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20527 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20528 module_free(NULL, image);
20529 return;
20530 }
20531 + pax_open_kernel();
20532 memcpy(image + proglen, temp, ilen);
20533 + pax_close_kernel();
20534 }
20535 proglen += ilen;
20536 addrs[i] = proglen;
20537 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20538 break;
20539 }
20540 if (proglen == oldproglen) {
20541 - image = module_alloc(max_t(unsigned int,
20542 + image = module_alloc_exec(max_t(unsigned int,
20543 proglen,
20544 sizeof(struct work_struct)));
20545 if (!image)
20546 diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
20547 --- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20548 +++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20549 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20550 {
20551 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20552
20553 - if (!user_mode_vm(regs)) {
20554 + if (!user_mode(regs)) {
20555 unsigned long stack = kernel_stack_pointer(regs);
20556 if (depth)
20557 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20558 diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
20559 --- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20560 +++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20561 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20562 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20563 pci_mmcfg_late_init();
20564 pcibios_enable_irq = mrst_pci_irq_enable;
20565 - pci_root_ops = pci_mrst_ops;
20566 + pax_open_kernel();
20567 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20568 + pax_close_kernel();
20569 /* Continue with standard init */
20570 return 1;
20571 }
20572 diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
20573 --- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20574 +++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20575 @@ -79,50 +79,93 @@ union bios32 {
20576 static struct {
20577 unsigned long address;
20578 unsigned short segment;
20579 -} bios32_indirect = { 0, __KERNEL_CS };
20580 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20581
20582 /*
20583 * Returns the entry point for the given service, NULL on error
20584 */
20585
20586 -static unsigned long bios32_service(unsigned long service)
20587 +static unsigned long __devinit bios32_service(unsigned long service)
20588 {
20589 unsigned char return_code; /* %al */
20590 unsigned long address; /* %ebx */
20591 unsigned long length; /* %ecx */
20592 unsigned long entry; /* %edx */
20593 unsigned long flags;
20594 + struct desc_struct d, *gdt;
20595
20596 local_irq_save(flags);
20597 - __asm__("lcall *(%%edi); cld"
20598 +
20599 + gdt = get_cpu_gdt_table(smp_processor_id());
20600 +
20601 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20602 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20603 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20604 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20605 +
20606 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20607 : "=a" (return_code),
20608 "=b" (address),
20609 "=c" (length),
20610 "=d" (entry)
20611 : "0" (service),
20612 "1" (0),
20613 - "D" (&bios32_indirect));
20614 + "D" (&bios32_indirect),
20615 + "r"(__PCIBIOS_DS)
20616 + : "memory");
20617 +
20618 + pax_open_kernel();
20619 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20620 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20621 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20622 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20623 + pax_close_kernel();
20624 +
20625 local_irq_restore(flags);
20626
20627 switch (return_code) {
20628 - case 0:
20629 - return address + entry;
20630 - case 0x80: /* Not present */
20631 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20632 - return 0;
20633 - default: /* Shouldn't happen */
20634 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20635 - service, return_code);
20636 + case 0: {
20637 + int cpu;
20638 + unsigned char flags;
20639 +
20640 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20641 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20642 + printk(KERN_WARNING "bios32_service: not valid\n");
20643 return 0;
20644 + }
20645 + address = address + PAGE_OFFSET;
20646 + length += 16UL; /* some BIOSs underreport this... */
20647 + flags = 4;
20648 + if (length >= 64*1024*1024) {
20649 + length >>= PAGE_SHIFT;
20650 + flags |= 8;
20651 + }
20652 +
20653 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20654 + gdt = get_cpu_gdt_table(cpu);
20655 + pack_descriptor(&d, address, length, 0x9b, flags);
20656 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20657 + pack_descriptor(&d, address, length, 0x93, flags);
20658 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20659 + }
20660 + return entry;
20661 + }
20662 + case 0x80: /* Not present */
20663 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20664 + return 0;
20665 + default: /* Shouldn't happen */
20666 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20667 + service, return_code);
20668 + return 0;
20669 }
20670 }
20671
20672 static struct {
20673 unsigned long address;
20674 unsigned short segment;
20675 -} pci_indirect = { 0, __KERNEL_CS };
20676 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20677
20678 -static int pci_bios_present;
20679 +static int pci_bios_present __read_only;
20680
20681 static int __devinit check_pcibios(void)
20682 {
20683 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20684 unsigned long flags, pcibios_entry;
20685
20686 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20687 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20688 + pci_indirect.address = pcibios_entry;
20689
20690 local_irq_save(flags);
20691 - __asm__(
20692 - "lcall *(%%edi); cld\n\t"
20693 + __asm__("movw %w6, %%ds\n\t"
20694 + "lcall *%%ss:(%%edi); cld\n\t"
20695 + "push %%ss\n\t"
20696 + "pop %%ds\n\t"
20697 "jc 1f\n\t"
20698 "xor %%ah, %%ah\n"
20699 "1:"
20700 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20701 "=b" (ebx),
20702 "=c" (ecx)
20703 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20704 - "D" (&pci_indirect)
20705 + "D" (&pci_indirect),
20706 + "r" (__PCIBIOS_DS)
20707 : "memory");
20708 local_irq_restore(flags);
20709
20710 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20711
20712 switch (len) {
20713 case 1:
20714 - __asm__("lcall *(%%esi); cld\n\t"
20715 + __asm__("movw %w6, %%ds\n\t"
20716 + "lcall *%%ss:(%%esi); cld\n\t"
20717 + "push %%ss\n\t"
20718 + "pop %%ds\n\t"
20719 "jc 1f\n\t"
20720 "xor %%ah, %%ah\n"
20721 "1:"
20722 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20723 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20724 "b" (bx),
20725 "D" ((long)reg),
20726 - "S" (&pci_indirect));
20727 + "S" (&pci_indirect),
20728 + "r" (__PCIBIOS_DS));
20729 /*
20730 * Zero-extend the result beyond 8 bits, do not trust the
20731 * BIOS having done it:
20732 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20733 *value &= 0xff;
20734 break;
20735 case 2:
20736 - __asm__("lcall *(%%esi); cld\n\t"
20737 + __asm__("movw %w6, %%ds\n\t"
20738 + "lcall *%%ss:(%%esi); cld\n\t"
20739 + "push %%ss\n\t"
20740 + "pop %%ds\n\t"
20741 "jc 1f\n\t"
20742 "xor %%ah, %%ah\n"
20743 "1:"
20744 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20745 : "1" (PCIBIOS_READ_CONFIG_WORD),
20746 "b" (bx),
20747 "D" ((long)reg),
20748 - "S" (&pci_indirect));
20749 + "S" (&pci_indirect),
20750 + "r" (__PCIBIOS_DS));
20751 /*
20752 * Zero-extend the result beyond 16 bits, do not trust the
20753 * BIOS having done it:
20754 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20755 *value &= 0xffff;
20756 break;
20757 case 4:
20758 - __asm__("lcall *(%%esi); cld\n\t"
20759 + __asm__("movw %w6, %%ds\n\t"
20760 + "lcall *%%ss:(%%esi); cld\n\t"
20761 + "push %%ss\n\t"
20762 + "pop %%ds\n\t"
20763 "jc 1f\n\t"
20764 "xor %%ah, %%ah\n"
20765 "1:"
20766 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20767 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20768 "b" (bx),
20769 "D" ((long)reg),
20770 - "S" (&pci_indirect));
20771 + "S" (&pci_indirect),
20772 + "r" (__PCIBIOS_DS));
20773 break;
20774 }
20775
20776 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20777
20778 switch (len) {
20779 case 1:
20780 - __asm__("lcall *(%%esi); cld\n\t"
20781 + __asm__("movw %w6, %%ds\n\t"
20782 + "lcall *%%ss:(%%esi); cld\n\t"
20783 + "push %%ss\n\t"
20784 + "pop %%ds\n\t"
20785 "jc 1f\n\t"
20786 "xor %%ah, %%ah\n"
20787 "1:"
20788 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20789 "c" (value),
20790 "b" (bx),
20791 "D" ((long)reg),
20792 - "S" (&pci_indirect));
20793 + "S" (&pci_indirect),
20794 + "r" (__PCIBIOS_DS));
20795 break;
20796 case 2:
20797 - __asm__("lcall *(%%esi); cld\n\t"
20798 + __asm__("movw %w6, %%ds\n\t"
20799 + "lcall *%%ss:(%%esi); cld\n\t"
20800 + "push %%ss\n\t"
20801 + "pop %%ds\n\t"
20802 "jc 1f\n\t"
20803 "xor %%ah, %%ah\n"
20804 "1:"
20805 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20806 "c" (value),
20807 "b" (bx),
20808 "D" ((long)reg),
20809 - "S" (&pci_indirect));
20810 + "S" (&pci_indirect),
20811 + "r" (__PCIBIOS_DS));
20812 break;
20813 case 4:
20814 - __asm__("lcall *(%%esi); cld\n\t"
20815 + __asm__("movw %w6, %%ds\n\t"
20816 + "lcall *%%ss:(%%esi); cld\n\t"
20817 + "push %%ss\n\t"
20818 + "pop %%ds\n\t"
20819 "jc 1f\n\t"
20820 "xor %%ah, %%ah\n"
20821 "1:"
20822 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20823 "c" (value),
20824 "b" (bx),
20825 "D" ((long)reg),
20826 - "S" (&pci_indirect));
20827 + "S" (&pci_indirect),
20828 + "r" (__PCIBIOS_DS));
20829 break;
20830 }
20831
20832 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20833
20834 DBG("PCI: Fetching IRQ routing table... ");
20835 __asm__("push %%es\n\t"
20836 + "movw %w8, %%ds\n\t"
20837 "push %%ds\n\t"
20838 "pop %%es\n\t"
20839 - "lcall *(%%esi); cld\n\t"
20840 + "lcall *%%ss:(%%esi); cld\n\t"
20841 "pop %%es\n\t"
20842 + "push %%ss\n\t"
20843 + "pop %%ds\n"
20844 "jc 1f\n\t"
20845 "xor %%ah, %%ah\n"
20846 "1:"
20847 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20848 "1" (0),
20849 "D" ((long) &opt),
20850 "S" (&pci_indirect),
20851 - "m" (opt)
20852 + "m" (opt),
20853 + "r" (__PCIBIOS_DS)
20854 : "memory");
20855 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20856 if (ret & 0xff00)
20857 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20858 {
20859 int ret;
20860
20861 - __asm__("lcall *(%%esi); cld\n\t"
20862 + __asm__("movw %w5, %%ds\n\t"
20863 + "lcall *%%ss:(%%esi); cld\n\t"
20864 + "push %%ss\n\t"
20865 + "pop %%ds\n"
20866 "jc 1f\n\t"
20867 "xor %%ah, %%ah\n"
20868 "1:"
20869 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20870 : "0" (PCIBIOS_SET_PCI_HW_INT),
20871 "b" ((dev->bus->number << 8) | dev->devfn),
20872 "c" ((irq << 8) | (pin + 10)),
20873 - "S" (&pci_indirect));
20874 + "S" (&pci_indirect),
20875 + "r" (__PCIBIOS_DS));
20876 return !(ret & 0xff00);
20877 }
20878 EXPORT_SYMBOL(pcibios_set_irq_routing);
20879 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
20880 --- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20881 +++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20882 @@ -38,70 +38,37 @@
20883 */
20884
20885 static unsigned long efi_rt_eflags;
20886 -static pgd_t efi_bak_pg_dir_pointer[2];
20887 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20888
20889 -void efi_call_phys_prelog(void)
20890 +void __init efi_call_phys_prelog(void)
20891 {
20892 - unsigned long cr4;
20893 - unsigned long temp;
20894 struct desc_ptr gdt_descr;
20895
20896 local_irq_save(efi_rt_eflags);
20897
20898 - /*
20899 - * If I don't have PAE, I should just duplicate two entries in page
20900 - * directory. If I have PAE, I just need to duplicate one entry in
20901 - * page directory.
20902 - */
20903 - cr4 = read_cr4_safe();
20904 -
20905 - if (cr4 & X86_CR4_PAE) {
20906 - efi_bak_pg_dir_pointer[0].pgd =
20907 - swapper_pg_dir[pgd_index(0)].pgd;
20908 - swapper_pg_dir[0].pgd =
20909 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20910 - } else {
20911 - efi_bak_pg_dir_pointer[0].pgd =
20912 - swapper_pg_dir[pgd_index(0)].pgd;
20913 - efi_bak_pg_dir_pointer[1].pgd =
20914 - swapper_pg_dir[pgd_index(0x400000)].pgd;
20915 - swapper_pg_dir[pgd_index(0)].pgd =
20916 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20917 - temp = PAGE_OFFSET + 0x400000;
20918 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20919 - swapper_pg_dir[pgd_index(temp)].pgd;
20920 - }
20921 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20922 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20923 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20924
20925 /*
20926 * After the lock is released, the original page table is restored.
20927 */
20928 __flush_tlb_all();
20929
20930 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
20931 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20932 gdt_descr.size = GDT_SIZE - 1;
20933 load_gdt(&gdt_descr);
20934 }
20935
20936 -void efi_call_phys_epilog(void)
20937 +void __init efi_call_phys_epilog(void)
20938 {
20939 - unsigned long cr4;
20940 struct desc_ptr gdt_descr;
20941
20942 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20943 + gdt_descr.address = get_cpu_gdt_table(0);
20944 gdt_descr.size = GDT_SIZE - 1;
20945 load_gdt(&gdt_descr);
20946
20947 - cr4 = read_cr4_safe();
20948 -
20949 - if (cr4 & X86_CR4_PAE) {
20950 - swapper_pg_dir[pgd_index(0)].pgd =
20951 - efi_bak_pg_dir_pointer[0].pgd;
20952 - } else {
20953 - swapper_pg_dir[pgd_index(0)].pgd =
20954 - efi_bak_pg_dir_pointer[0].pgd;
20955 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20956 - efi_bak_pg_dir_pointer[1].pgd;
20957 - }
20958 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20959
20960 /*
20961 * After the lock is released, the original page table is restored.
20962 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
20963 --- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20964 +++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20965 @@ -6,6 +6,7 @@
20966 */
20967
20968 #include <linux/linkage.h>
20969 +#include <linux/init.h>
20970 #include <asm/page_types.h>
20971
20972 /*
20973 @@ -20,7 +21,7 @@
20974 * service functions will comply with gcc calling convention, too.
20975 */
20976
20977 -.text
20978 +__INIT
20979 ENTRY(efi_call_phys)
20980 /*
20981 * 0. The function can only be called in Linux kernel. So CS has been
20982 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20983 * The mapping of lower virtual memory has been created in prelog and
20984 * epilog.
20985 */
20986 - movl $1f, %edx
20987 - subl $__PAGE_OFFSET, %edx
20988 - jmp *%edx
20989 + jmp 1f-__PAGE_OFFSET
20990 1:
20991
20992 /*
20993 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20994 * parameter 2, ..., param n. To make things easy, we save the return
20995 * address of efi_call_phys in a global variable.
20996 */
20997 - popl %edx
20998 - movl %edx, saved_return_addr
20999 - /* get the function pointer into ECX*/
21000 - popl %ecx
21001 - movl %ecx, efi_rt_function_ptr
21002 - movl $2f, %edx
21003 - subl $__PAGE_OFFSET, %edx
21004 - pushl %edx
21005 + popl (saved_return_addr)
21006 + popl (efi_rt_function_ptr)
21007
21008 /*
21009 * 3. Clear PG bit in %CR0.
21010 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21011 /*
21012 * 5. Call the physical function.
21013 */
21014 - jmp *%ecx
21015 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21016
21017 -2:
21018 /*
21019 * 6. After EFI runtime service returns, control will return to
21020 * following instruction. We'd better readjust stack pointer first.
21021 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21022 movl %cr0, %edx
21023 orl $0x80000000, %edx
21024 movl %edx, %cr0
21025 - jmp 1f
21026 -1:
21027 +
21028 /*
21029 * 8. Now restore the virtual mode from flat mode by
21030 * adding EIP with PAGE_OFFSET.
21031 */
21032 - movl $1f, %edx
21033 - jmp *%edx
21034 + jmp 1f+__PAGE_OFFSET
21035 1:
21036
21037 /*
21038 * 9. Balance the stack. And because EAX contain the return value,
21039 * we'd better not clobber it.
21040 */
21041 - leal efi_rt_function_ptr, %edx
21042 - movl (%edx), %ecx
21043 - pushl %ecx
21044 + pushl (efi_rt_function_ptr)
21045
21046 /*
21047 - * 10. Push the saved return address onto the stack and return.
21048 + * 10. Return to the saved return address.
21049 */
21050 - leal saved_return_addr, %edx
21051 - movl (%edx), %ecx
21052 - pushl %ecx
21053 - ret
21054 + jmpl *(saved_return_addr)
21055 ENDPROC(efi_call_phys)
21056 .previous
21057
21058 -.data
21059 +__INITDATA
21060 saved_return_addr:
21061 .long 0
21062 efi_rt_function_ptr:
21063 diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21064 --- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21065 +++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21066 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21067 }
21068
21069 /* Reboot and power off are handled by the SCU on a MID device */
21070 -static void mrst_power_off(void)
21071 +static __noreturn void mrst_power_off(void)
21072 {
21073 intel_scu_ipc_simple_command(0xf1, 1);
21074 + BUG();
21075 }
21076
21077 -static void mrst_reboot(void)
21078 +static __noreturn void mrst_reboot(void)
21079 {
21080 intel_scu_ipc_simple_command(0xf1, 0);
21081 + BUG();
21082 }
21083
21084 /*
21085 diff -urNp linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c
21086 --- linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c 2011-07-21 22:17:23.000000000 -0400
21087 +++ linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c 2011-08-29 22:31:19.000000000 -0400
21088 @@ -163,7 +163,7 @@ static struct of_pdt_ops prom_olpc_ops _
21089 .getchild = olpc_dt_getchild,
21090 .getsibling = olpc_dt_getsibling,
21091 .pkg2path = olpc_dt_pkg2path,
21092 -};
21093 +} __no_const;
21094
21095 void __init olpc_dt_build_devicetree(void)
21096 {
21097 diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21098 --- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21099 +++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21100 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21101 cpumask_t mask;
21102 struct reset_args reset_args;
21103
21104 + pax_track_stack();
21105 +
21106 reset_args.sender = sender;
21107 cpus_clear(mask);
21108 /* find a single cpu for each uvhub in this distribution mask */
21109 diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21110 --- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21111 +++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21112 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21113 static void fix_processor_context(void)
21114 {
21115 int cpu = smp_processor_id();
21116 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21117 + struct tss_struct *t = init_tss + cpu;
21118
21119 set_tss_desc(cpu, t); /*
21120 * This just modifies memory; should not be
21121 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21122 */
21123
21124 #ifdef CONFIG_X86_64
21125 + pax_open_kernel();
21126 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21127 + pax_close_kernel();
21128
21129 syscall_init(); /* This sets MSR_*STAR and related */
21130 #endif
21131 diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21132 --- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21133 +++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21134 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21135 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21136 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21137
21138 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21139 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21140 GCOV_PROFILE := n
21141
21142 #
21143 diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21144 --- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21145 +++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21146 @@ -25,6 +25,7 @@
21147 #include <asm/tlbflush.h>
21148 #include <asm/vdso.h>
21149 #include <asm/proto.h>
21150 +#include <asm/mman.h>
21151
21152 enum {
21153 VDSO_DISABLED = 0,
21154 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21155 void enable_sep_cpu(void)
21156 {
21157 int cpu = get_cpu();
21158 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21159 + struct tss_struct *tss = init_tss + cpu;
21160
21161 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21162 put_cpu();
21163 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21164 gate_vma.vm_start = FIXADDR_USER_START;
21165 gate_vma.vm_end = FIXADDR_USER_END;
21166 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21167 - gate_vma.vm_page_prot = __P101;
21168 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21169 /*
21170 * Make sure the vDSO gets into every core dump.
21171 * Dumping its contents makes post-mortem fully interpretable later
21172 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21173 if (compat)
21174 addr = VDSO_HIGH_BASE;
21175 else {
21176 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21177 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21178 if (IS_ERR_VALUE(addr)) {
21179 ret = addr;
21180 goto up_fail;
21181 }
21182 }
21183
21184 - current->mm->context.vdso = (void *)addr;
21185 + current->mm->context.vdso = addr;
21186
21187 if (compat_uses_vma || !compat) {
21188 /*
21189 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21190 }
21191
21192 current_thread_info()->sysenter_return =
21193 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21194 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21195
21196 up_fail:
21197 if (ret)
21198 - current->mm->context.vdso = NULL;
21199 + current->mm->context.vdso = 0;
21200
21201 up_write(&mm->mmap_sem);
21202
21203 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21204
21205 const char *arch_vma_name(struct vm_area_struct *vma)
21206 {
21207 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21208 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21209 return "[vdso]";
21210 +
21211 +#ifdef CONFIG_PAX_SEGMEXEC
21212 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21213 + return "[vdso]";
21214 +#endif
21215 +
21216 return NULL;
21217 }
21218
21219 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21220 * Check to see if the corresponding task was created in compat vdso
21221 * mode.
21222 */
21223 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21224 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21225 return &gate_vma;
21226 return NULL;
21227 }
21228 diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21229 --- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21230 +++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21231 @@ -15,18 +15,19 @@
21232 #include <asm/proto.h>
21233 #include <asm/vdso.h>
21234
21235 -unsigned int __read_mostly vdso_enabled = 1;
21236 -
21237 extern char vdso_start[], vdso_end[];
21238 extern unsigned short vdso_sync_cpuid;
21239 +extern char __vsyscall_0;
21240
21241 static struct page **vdso_pages;
21242 +static struct page *vsyscall_page;
21243 static unsigned vdso_size;
21244
21245 static int __init init_vdso_vars(void)
21246 {
21247 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21248 - int i;
21249 + size_t nbytes = vdso_end - vdso_start;
21250 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21251 + size_t i;
21252
21253 vdso_size = npages << PAGE_SHIFT;
21254 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21255 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21256 goto oom;
21257 for (i = 0; i < npages; i++) {
21258 struct page *p;
21259 - p = alloc_page(GFP_KERNEL);
21260 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21261 if (!p)
21262 goto oom;
21263 vdso_pages[i] = p;
21264 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21265 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21266 + nbytes -= PAGE_SIZE;
21267 }
21268 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21269
21270 return 0;
21271
21272 oom:
21273 - printk("Cannot allocate vdso\n");
21274 - vdso_enabled = 0;
21275 - return -ENOMEM;
21276 + panic("Cannot allocate vdso\n");
21277 }
21278 subsys_initcall(init_vdso_vars);
21279
21280 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21281 unsigned long addr;
21282 int ret;
21283
21284 - if (!vdso_enabled)
21285 - return 0;
21286 -
21287 down_write(&mm->mmap_sem);
21288 - addr = vdso_addr(mm->start_stack, vdso_size);
21289 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21290 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21291 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21292 if (IS_ERR_VALUE(addr)) {
21293 ret = addr;
21294 goto up_fail;
21295 }
21296
21297 - current->mm->context.vdso = (void *)addr;
21298 + mm->context.vdso = addr + PAGE_SIZE;
21299
21300 - ret = install_special_mapping(mm, addr, vdso_size,
21301 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21302 VM_READ|VM_EXEC|
21303 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21304 + VM_MAYREAD|VM_MAYEXEC|
21305 VM_ALWAYSDUMP,
21306 - vdso_pages);
21307 + &vsyscall_page);
21308 if (ret) {
21309 - current->mm->context.vdso = NULL;
21310 + mm->context.vdso = 0;
21311 goto up_fail;
21312 }
21313
21314 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21315 + VM_READ|VM_EXEC|
21316 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21317 + VM_ALWAYSDUMP,
21318 + vdso_pages);
21319 + if (ret)
21320 + mm->context.vdso = 0;
21321 +
21322 up_fail:
21323 up_write(&mm->mmap_sem);
21324 return ret;
21325 }
21326 -
21327 -static __init int vdso_setup(char *s)
21328 -{
21329 - vdso_enabled = simple_strtoul(s, NULL, 0);
21330 - return 0;
21331 -}
21332 -__setup("vdso=", vdso_setup);
21333 diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21334 --- linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:13.000000000 -0400
21335 +++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21336 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21337
21338 struct shared_info xen_dummy_shared_info;
21339
21340 -void *xen_initial_gdt;
21341 -
21342 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21343 __read_mostly int xen_have_vector_callback;
21344 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21345 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21346 #endif
21347 };
21348
21349 -static void xen_reboot(int reason)
21350 +static __noreturn void xen_reboot(int reason)
21351 {
21352 struct sched_shutdown r = { .reason = reason };
21353
21354 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21355 BUG();
21356 }
21357
21358 -static void xen_restart(char *msg)
21359 +static __noreturn void xen_restart(char *msg)
21360 {
21361 xen_reboot(SHUTDOWN_reboot);
21362 }
21363
21364 -static void xen_emergency_restart(void)
21365 +static __noreturn void xen_emergency_restart(void)
21366 {
21367 xen_reboot(SHUTDOWN_reboot);
21368 }
21369
21370 -static void xen_machine_halt(void)
21371 +static __noreturn void xen_machine_halt(void)
21372 {
21373 xen_reboot(SHUTDOWN_poweroff);
21374 }
21375 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21376 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21377
21378 /* Work out if we support NX */
21379 - x86_configure_nx();
21380 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21381 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21382 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21383 + unsigned l, h;
21384 +
21385 + __supported_pte_mask |= _PAGE_NX;
21386 + rdmsr(MSR_EFER, l, h);
21387 + l |= EFER_NX;
21388 + wrmsr(MSR_EFER, l, h);
21389 + }
21390 +#endif
21391
21392 xen_setup_features();
21393
21394 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21395
21396 machine_ops = xen_machine_ops;
21397
21398 - /*
21399 - * The only reliable way to retain the initial address of the
21400 - * percpu gdt_page is to remember it here, so we can go and
21401 - * mark it RW later, when the initial percpu area is freed.
21402 - */
21403 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21404 -
21405 xen_smp_init();
21406
21407 #ifdef CONFIG_ACPI_NUMA
21408 diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21409 --- linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:13.000000000 -0400
21410 +++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21411 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21412 convert_pfn_mfn(init_level4_pgt);
21413 convert_pfn_mfn(level3_ident_pgt);
21414 convert_pfn_mfn(level3_kernel_pgt);
21415 + convert_pfn_mfn(level3_vmalloc_pgt);
21416 + convert_pfn_mfn(level3_vmemmap_pgt);
21417
21418 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21419 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21420 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21421 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21422 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21423 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21424 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21425 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21426 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21427 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21428 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21429 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21430
21431 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21432 pv_mmu_ops.set_pud = xen_set_pud;
21433 #if PAGETABLE_LEVELS == 4
21434 pv_mmu_ops.set_pgd = xen_set_pgd;
21435 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21436 #endif
21437
21438 /* This will work as long as patching hasn't happened yet
21439 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21440 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21441 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21442 .set_pgd = xen_set_pgd_hyper,
21443 + .set_pgd_batched = xen_set_pgd_hyper,
21444
21445 .alloc_pud = xen_alloc_pmd_init,
21446 .release_pud = xen_release_pmd_init,
21447 diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21448 --- linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:13.000000000 -0400
21449 +++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
21450 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21451 {
21452 BUG_ON(smp_processor_id() != 0);
21453 native_smp_prepare_boot_cpu();
21454 -
21455 - /* We've switched to the "real" per-cpu gdt, so make sure the
21456 - old memory can be recycled */
21457 - make_lowmem_page_readwrite(xen_initial_gdt);
21458 -
21459 xen_filter_cpu_maps();
21460 xen_setup_vcpu_info_placement();
21461 }
21462 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21463 gdt = get_cpu_gdt_table(cpu);
21464
21465 ctxt->flags = VGCF_IN_KERNEL;
21466 - ctxt->user_regs.ds = __USER_DS;
21467 - ctxt->user_regs.es = __USER_DS;
21468 + ctxt->user_regs.ds = __KERNEL_DS;
21469 + ctxt->user_regs.es = __KERNEL_DS;
21470 ctxt->user_regs.ss = __KERNEL_DS;
21471 #ifdef CONFIG_X86_32
21472 ctxt->user_regs.fs = __KERNEL_PERCPU;
21473 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21474 + savesegment(gs, ctxt->user_regs.gs);
21475 #else
21476 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21477 #endif
21478 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21479 int rc;
21480
21481 per_cpu(current_task, cpu) = idle;
21482 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21483 #ifdef CONFIG_X86_32
21484 irq_ctx_init(cpu);
21485 #else
21486 clear_tsk_thread_flag(idle, TIF_FORK);
21487 - per_cpu(kernel_stack, cpu) =
21488 - (unsigned long)task_stack_page(idle) -
21489 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21490 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21491 #endif
21492 xen_setup_runstate_info(cpu);
21493 xen_setup_timer(cpu);
21494 diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
21495 --- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21496 +++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21497 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21498 ESP_OFFSET=4 # bytes pushed onto stack
21499
21500 /*
21501 - * Store vcpu_info pointer for easy access. Do it this way to
21502 - * avoid having to reload %fs
21503 + * Store vcpu_info pointer for easy access.
21504 */
21505 #ifdef CONFIG_SMP
21506 - GET_THREAD_INFO(%eax)
21507 - movl TI_cpu(%eax), %eax
21508 - movl __per_cpu_offset(,%eax,4), %eax
21509 - mov xen_vcpu(%eax), %eax
21510 + push %fs
21511 + mov $(__KERNEL_PERCPU), %eax
21512 + mov %eax, %fs
21513 + mov PER_CPU_VAR(xen_vcpu), %eax
21514 + pop %fs
21515 #else
21516 movl xen_vcpu, %eax
21517 #endif
21518 diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
21519 --- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21520 +++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21521 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21522 #ifdef CONFIG_X86_32
21523 mov %esi,xen_start_info
21524 mov $init_thread_union+THREAD_SIZE,%esp
21525 +#ifdef CONFIG_SMP
21526 + movl $cpu_gdt_table,%edi
21527 + movl $__per_cpu_load,%eax
21528 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21529 + rorl $16,%eax
21530 + movb %al,__KERNEL_PERCPU + 4(%edi)
21531 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21532 + movl $__per_cpu_end - 1,%eax
21533 + subl $__per_cpu_start,%eax
21534 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21535 +#endif
21536 #else
21537 mov %rsi,xen_start_info
21538 mov $init_thread_union+THREAD_SIZE,%rsp
21539 diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
21540 --- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21541 +++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21542 @@ -10,8 +10,6 @@
21543 extern const char xen_hypervisor_callback[];
21544 extern const char xen_failsafe_callback[];
21545
21546 -extern void *xen_initial_gdt;
21547 -
21548 struct trap_info;
21549 void xen_copy_trap_info(struct trap_info *traps);
21550
21551 diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
21552 --- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21553 +++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21554 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21555 }
21556 EXPORT_SYMBOL(blk_iopoll_complete);
21557
21558 -static void blk_iopoll_softirq(struct softirq_action *h)
21559 +static void blk_iopoll_softirq(void)
21560 {
21561 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21562 int rearm = 0, budget = blk_iopoll_budget;
21563 diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
21564 --- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21565 +++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21566 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21567 if (!len || !kbuf)
21568 return -EINVAL;
21569
21570 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21571 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21572 if (do_copy)
21573 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21574 else
21575 diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
21576 --- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21577 +++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21578 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21579 * Softirq action handler - move entries to local list and loop over them
21580 * while passing them to the queue registered handler.
21581 */
21582 -static void blk_done_softirq(struct softirq_action *h)
21583 +static void blk_done_softirq(void)
21584 {
21585 struct list_head *cpu_list, local_list;
21586
21587 diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
21588 --- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21589 +++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21590 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21591 struct sg_io_v4 *hdr, struct bsg_device *bd,
21592 fmode_t has_write_perm)
21593 {
21594 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21595 + unsigned char *cmdptr;
21596 +
21597 if (hdr->request_len > BLK_MAX_CDB) {
21598 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21599 if (!rq->cmd)
21600 return -ENOMEM;
21601 - }
21602 + cmdptr = rq->cmd;
21603 + } else
21604 + cmdptr = tmpcmd;
21605
21606 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21607 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21608 hdr->request_len))
21609 return -EFAULT;
21610
21611 + if (cmdptr != rq->cmd)
21612 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21613 +
21614 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21615 if (blk_verify_command(rq->cmd, has_write_perm))
21616 return -EPERM;
21617 diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
21618 --- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21619 +++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21620 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21621 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21622 struct sg_io_hdr *hdr, fmode_t mode)
21623 {
21624 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21625 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21626 + unsigned char *cmdptr;
21627 +
21628 + if (rq->cmd != rq->__cmd)
21629 + cmdptr = rq->cmd;
21630 + else
21631 + cmdptr = tmpcmd;
21632 +
21633 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21634 return -EFAULT;
21635 +
21636 + if (cmdptr != rq->cmd)
21637 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21638 +
21639 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21640 return -EPERM;
21641
21642 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21643 int err;
21644 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21645 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21646 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21647 + unsigned char *cmdptr;
21648
21649 if (!sic)
21650 return -EINVAL;
21651 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21652 */
21653 err = -EFAULT;
21654 rq->cmd_len = cmdlen;
21655 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21656 +
21657 + if (rq->cmd != rq->__cmd)
21658 + cmdptr = rq->cmd;
21659 + else
21660 + cmdptr = tmpcmd;
21661 +
21662 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21663 goto error;
21664
21665 + if (rq->cmd != cmdptr)
21666 + memcpy(rq->cmd, cmdptr, cmdlen);
21667 +
21668 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21669 goto error;
21670
21671 diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
21672 --- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21673 +++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21674 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21675
21676 struct cryptd_blkcipher_request_ctx {
21677 crypto_completion_t complete;
21678 -};
21679 +} __no_const;
21680
21681 struct cryptd_hash_ctx {
21682 struct crypto_shash *child;
21683 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21684
21685 struct cryptd_aead_request_ctx {
21686 crypto_completion_t complete;
21687 -};
21688 +} __no_const;
21689
21690 static void cryptd_queue_worker(struct work_struct *work);
21691
21692 diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
21693 --- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21694 +++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21695 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21696 for (i = 0; i < 7; ++i)
21697 gf128mul_x_lle(&p[i + 1], &p[i]);
21698
21699 - memset(r, 0, sizeof(r));
21700 + memset(r, 0, sizeof(*r));
21701 for (i = 0;;) {
21702 u8 ch = ((u8 *)b)[15 - i];
21703
21704 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21705 for (i = 0; i < 7; ++i)
21706 gf128mul_x_bbe(&p[i + 1], &p[i]);
21707
21708 - memset(r, 0, sizeof(r));
21709 + memset(r, 0, sizeof(*r));
21710 for (i = 0;;) {
21711 u8 ch = ((u8 *)b)[i];
21712
21713 diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
21714 --- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21715 +++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21716 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21717 u32 r0,r1,r2,r3,r4;
21718 int i;
21719
21720 + pax_track_stack();
21721 +
21722 /* Copy key, add padding */
21723
21724 for (i = 0; i < keylen; ++i)
21725 diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
21726 --- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21727 +++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21728 @@ -5,6 +5,7 @@
21729 *.cis
21730 *.cpio
21731 *.csp
21732 +*.dbg
21733 *.dsp
21734 *.dvi
21735 *.elf
21736 @@ -48,9 +49,11 @@
21737 *.tab.h
21738 *.tex
21739 *.ver
21740 +*.vim
21741 *.xml
21742 *.xz
21743 *_MODULES
21744 +*_reg_safe.h
21745 *_vga16.c
21746 *~
21747 \#*#
21748 @@ -70,6 +73,7 @@ Kerntypes
21749 Module.markers
21750 Module.symvers
21751 PENDING
21752 +PERF*
21753 SCCS
21754 System.map*
21755 TAGS
21756 @@ -98,6 +102,8 @@ bzImage*
21757 capability_names.h
21758 capflags.c
21759 classlist.h*
21760 +clut_vga16.c
21761 +common-cmds.h
21762 comp*.log
21763 compile.h*
21764 conf
21765 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21766 gconf
21767 gconf.glade.h
21768 gen-devlist
21769 +gen-kdb_cmds.c
21770 gen_crc32table
21771 gen_init_cpio
21772 generated
21773 genheaders
21774 genksyms
21775 *_gray256.c
21776 +hash
21777 hpet_example
21778 hugepage-mmap
21779 hugepage-shm
21780 @@ -146,7 +154,6 @@ int32.c
21781 int4.c
21782 int8.c
21783 kallsyms
21784 -kconfig
21785 keywords.c
21786 ksym.c*
21787 ksym.h*
21788 @@ -154,7 +161,6 @@ kxgettext
21789 lkc_defs.h
21790 lex.c
21791 lex.*.c
21792 -linux
21793 logo_*.c
21794 logo_*_clut224.c
21795 logo_*_mono.c
21796 @@ -174,6 +180,7 @@ mkboot
21797 mkbugboot
21798 mkcpustr
21799 mkdep
21800 +mkpiggy
21801 mkprep
21802 mkregtable
21803 mktables
21804 @@ -209,6 +216,7 @@ r300_reg_safe.h
21805 r420_reg_safe.h
21806 r600_reg_safe.h
21807 recordmcount
21808 +regdb.c
21809 relocs
21810 rlim_names.h
21811 rn50_reg_safe.h
21812 @@ -219,6 +227,7 @@ setup
21813 setup.bin
21814 setup.elf
21815 sImage
21816 +slabinfo
21817 sm_tbl*
21818 split-include
21819 syscalltab.h
21820 @@ -246,7 +255,9 @@ vmlinux
21821 vmlinux-*
21822 vmlinux.aout
21823 vmlinux.bin.all
21824 +vmlinux.bin.bz2
21825 vmlinux.lds
21826 +vmlinux.relocs
21827 vmlinuz
21828 voffset.h
21829 vsyscall.lds
21830 @@ -254,6 +265,7 @@ vsyscall_32.lds
21831 wanxlfw.inc
21832 uImage
21833 unifdef
21834 +utsrelease.h
21835 wakeup.bin
21836 wakeup.elf
21837 wakeup.lds
21838 diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
21839 --- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21840 +++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21841 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21842 the specified number of seconds. This is to be used if
21843 your oopses keep scrolling off the screen.
21844
21845 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21846 + virtualization environments that don't cope well with the
21847 + expand down segment used by UDEREF on X86-32 or the frequent
21848 + page table updates on X86-64.
21849 +
21850 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21851 +
21852 pcbit= [HW,ISDN]
21853
21854 pcd. [PARIDE]
21855 diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
21856 --- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21857 +++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21858 @@ -38,12 +38,12 @@
21859 */
21860 u64 cper_next_record_id(void)
21861 {
21862 - static atomic64_t seq;
21863 + static atomic64_unchecked_t seq;
21864
21865 - if (!atomic64_read(&seq))
21866 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
21867 + if (!atomic64_read_unchecked(&seq))
21868 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21869
21870 - return atomic64_inc_return(&seq);
21871 + return atomic64_inc_return_unchecked(&seq);
21872 }
21873 EXPORT_SYMBOL_GPL(cper_next_record_id);
21874
21875 diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
21876 --- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21877 +++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21878 @@ -11,6 +11,7 @@
21879 #include <linux/kernel.h>
21880 #include <linux/acpi.h>
21881 #include <linux/debugfs.h>
21882 +#include <asm/uaccess.h>
21883 #include "internal.h"
21884
21885 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21886 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21887 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21888 */
21889 unsigned int size = EC_SPACE_SIZE;
21890 - u8 *data = (u8 *) buf;
21891 + u8 data;
21892 loff_t init_off = *off;
21893 int err = 0;
21894
21895 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21896 size = count;
21897
21898 while (size) {
21899 - err = ec_read(*off, &data[*off - init_off]);
21900 + err = ec_read(*off, &data);
21901 if (err)
21902 return err;
21903 + if (put_user(data, &buf[*off - init_off]))
21904 + return -EFAULT;
21905 *off += 1;
21906 size--;
21907 }
21908 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21909
21910 unsigned int size = count;
21911 loff_t init_off = *off;
21912 - u8 *data = (u8 *) buf;
21913 int err = 0;
21914
21915 if (*off >= EC_SPACE_SIZE)
21916 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21917 }
21918
21919 while (size) {
21920 - u8 byte_write = data[*off - init_off];
21921 + u8 byte_write;
21922 + if (get_user(byte_write, &buf[*off - init_off]))
21923 + return -EFAULT;
21924 err = ec_write(*off, byte_write);
21925 if (err)
21926 return err;
21927 diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
21928 --- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21929 +++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21930 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21931 size_t count, loff_t * ppos)
21932 {
21933 struct list_head *node, *next;
21934 - char strbuf[5];
21935 - char str[5] = "";
21936 - unsigned int len = count;
21937 -
21938 - if (len > 4)
21939 - len = 4;
21940 - if (len < 0)
21941 - return -EFAULT;
21942 + char strbuf[5] = {0};
21943
21944 - if (copy_from_user(strbuf, buffer, len))
21945 + if (count > 4)
21946 + count = 4;
21947 + if (copy_from_user(strbuf, buffer, count))
21948 return -EFAULT;
21949 - strbuf[len] = '\0';
21950 - sscanf(strbuf, "%s", str);
21951 + strbuf[count] = '\0';
21952
21953 mutex_lock(&acpi_device_lock);
21954 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21955 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21956 if (!dev->wakeup.flags.valid)
21957 continue;
21958
21959 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
21960 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21961 if (device_can_wakeup(&dev->dev)) {
21962 bool enable = !device_may_wakeup(&dev->dev);
21963 device_set_wakeup_enable(&dev->dev, enable);
21964 diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
21965 --- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21966 +++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21967 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21968 return 0;
21969 #endif
21970
21971 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21972 + BUG_ON(pr->id >= nr_cpu_ids);
21973
21974 /*
21975 * Buggy BIOS check
21976 diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
21977 --- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21978 +++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21979 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21980 struct ata_port *ap;
21981 unsigned int tag;
21982
21983 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21984 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21985 ap = qc->ap;
21986
21987 qc->flags = 0;
21988 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21989 struct ata_port *ap;
21990 struct ata_link *link;
21991
21992 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21993 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21994 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21995 ap = qc->ap;
21996 link = qc->dev->link;
21997 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21998 return;
21999
22000 spin_lock(&lock);
22001 + pax_open_kernel();
22002
22003 for (cur = ops->inherits; cur; cur = cur->inherits) {
22004 void **inherit = (void **)cur;
22005 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22006 if (IS_ERR(*pp))
22007 *pp = NULL;
22008
22009 - ops->inherits = NULL;
22010 + *(struct ata_port_operations **)&ops->inherits = NULL;
22011
22012 + pax_close_kernel();
22013 spin_unlock(&lock);
22014 }
22015
22016 diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22017 --- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22018 +++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22019 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22020 {
22021 struct ata_link *link;
22022
22023 + pax_track_stack();
22024 +
22025 ata_for_each_link(link, ap, HOST_FIRST)
22026 ata_eh_link_report(link);
22027 }
22028 diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22029 --- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22030 +++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22031 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22032 /* Handle platform specific quirks */
22033 if (pdata->quirk) {
22034 if (pdata->quirk & CF_BROKEN_PIO) {
22035 - ap->ops->set_piomode = NULL;
22036 + pax_open_kernel();
22037 + *(void **)&ap->ops->set_piomode = NULL;
22038 + pax_close_kernel();
22039 ap->pio_mask = 0;
22040 }
22041 if (pdata->quirk & CF_BROKEN_MWDMA)
22042 diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22043 --- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22044 +++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22045 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22046 vcc->pop(vcc, skb);
22047 else
22048 dev_kfree_skb_any(skb);
22049 - atomic_inc(&vcc->stats->tx);
22050 + atomic_inc_unchecked(&vcc->stats->tx);
22051
22052 return 0;
22053 }
22054 diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22055 --- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22056 +++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22057 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22058 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22059
22060 // VC layer stats
22061 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22062 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22063
22064 // free the descriptor
22065 kfree (tx_descr);
22066 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22067 dump_skb ("<<<", vc, skb);
22068
22069 // VC layer stats
22070 - atomic_inc(&atm_vcc->stats->rx);
22071 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22072 __net_timestamp(skb);
22073 // end of our responsibility
22074 atm_vcc->push (atm_vcc, skb);
22075 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22076 } else {
22077 PRINTK (KERN_INFO, "dropped over-size frame");
22078 // should we count this?
22079 - atomic_inc(&atm_vcc->stats->rx_drop);
22080 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22081 }
22082
22083 } else {
22084 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22085 }
22086
22087 if (check_area (skb->data, skb->len)) {
22088 - atomic_inc(&atm_vcc->stats->tx_err);
22089 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22090 return -ENOMEM; // ?
22091 }
22092
22093 diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22094 --- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22095 +++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22096 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22097 if (vcc->pop) vcc->pop(vcc,skb);
22098 else dev_kfree_skb(skb);
22099 if (dev_data) return 0;
22100 - atomic_inc(&vcc->stats->tx_err);
22101 + atomic_inc_unchecked(&vcc->stats->tx_err);
22102 return -ENOLINK;
22103 }
22104 size = skb->len+sizeof(struct atmtcp_hdr);
22105 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22106 if (!new_skb) {
22107 if (vcc->pop) vcc->pop(vcc,skb);
22108 else dev_kfree_skb(skb);
22109 - atomic_inc(&vcc->stats->tx_err);
22110 + atomic_inc_unchecked(&vcc->stats->tx_err);
22111 return -ENOBUFS;
22112 }
22113 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22114 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22115 if (vcc->pop) vcc->pop(vcc,skb);
22116 else dev_kfree_skb(skb);
22117 out_vcc->push(out_vcc,new_skb);
22118 - atomic_inc(&vcc->stats->tx);
22119 - atomic_inc(&out_vcc->stats->rx);
22120 + atomic_inc_unchecked(&vcc->stats->tx);
22121 + atomic_inc_unchecked(&out_vcc->stats->rx);
22122 return 0;
22123 }
22124
22125 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22126 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22127 read_unlock(&vcc_sklist_lock);
22128 if (!out_vcc) {
22129 - atomic_inc(&vcc->stats->tx_err);
22130 + atomic_inc_unchecked(&vcc->stats->tx_err);
22131 goto done;
22132 }
22133 skb_pull(skb,sizeof(struct atmtcp_hdr));
22134 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22135 __net_timestamp(new_skb);
22136 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22137 out_vcc->push(out_vcc,new_skb);
22138 - atomic_inc(&vcc->stats->tx);
22139 - atomic_inc(&out_vcc->stats->rx);
22140 + atomic_inc_unchecked(&vcc->stats->tx);
22141 + atomic_inc_unchecked(&out_vcc->stats->rx);
22142 done:
22143 if (vcc->pop) vcc->pop(vcc,skb);
22144 else dev_kfree_skb(skb);
22145 diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22146 --- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22147 +++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22148 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22149 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22150 vcc->dev->number);
22151 length = 0;
22152 - atomic_inc(&vcc->stats->rx_err);
22153 + atomic_inc_unchecked(&vcc->stats->rx_err);
22154 }
22155 else {
22156 length = ATM_CELL_SIZE-1; /* no HEC */
22157 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22158 size);
22159 }
22160 eff = length = 0;
22161 - atomic_inc(&vcc->stats->rx_err);
22162 + atomic_inc_unchecked(&vcc->stats->rx_err);
22163 }
22164 else {
22165 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22166 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22167 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22168 vcc->dev->number,vcc->vci,length,size << 2,descr);
22169 length = eff = 0;
22170 - atomic_inc(&vcc->stats->rx_err);
22171 + atomic_inc_unchecked(&vcc->stats->rx_err);
22172 }
22173 }
22174 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22175 @@ -771,7 +771,7 @@ rx_dequeued++;
22176 vcc->push(vcc,skb);
22177 pushed++;
22178 }
22179 - atomic_inc(&vcc->stats->rx);
22180 + atomic_inc_unchecked(&vcc->stats->rx);
22181 }
22182 wake_up(&eni_dev->rx_wait);
22183 }
22184 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22185 PCI_DMA_TODEVICE);
22186 if (vcc->pop) vcc->pop(vcc,skb);
22187 else dev_kfree_skb_irq(skb);
22188 - atomic_inc(&vcc->stats->tx);
22189 + atomic_inc_unchecked(&vcc->stats->tx);
22190 wake_up(&eni_dev->tx_wait);
22191 dma_complete++;
22192 }
22193 diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22194 --- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22195 +++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22196 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22197 }
22198 }
22199
22200 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22201 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22202
22203 fs_dprintk (FS_DEBUG_TXMEM, "i");
22204 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22205 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22206 #endif
22207 skb_put (skb, qe->p1 & 0xffff);
22208 ATM_SKB(skb)->vcc = atm_vcc;
22209 - atomic_inc(&atm_vcc->stats->rx);
22210 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22211 __net_timestamp(skb);
22212 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22213 atm_vcc->push (atm_vcc, skb);
22214 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22215 kfree (pe);
22216 }
22217 if (atm_vcc)
22218 - atomic_inc(&atm_vcc->stats->rx_drop);
22219 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22220 break;
22221 case 0x1f: /* Reassembly abort: no buffers. */
22222 /* Silently increment error counter. */
22223 if (atm_vcc)
22224 - atomic_inc(&atm_vcc->stats->rx_drop);
22225 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22226 break;
22227 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22228 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22229 diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22230 --- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22231 +++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22232 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22233 #endif
22234 /* check error condition */
22235 if (*entry->status & STATUS_ERROR)
22236 - atomic_inc(&vcc->stats->tx_err);
22237 + atomic_inc_unchecked(&vcc->stats->tx_err);
22238 else
22239 - atomic_inc(&vcc->stats->tx);
22240 + atomic_inc_unchecked(&vcc->stats->tx);
22241 }
22242 }
22243
22244 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22245 if (skb == NULL) {
22246 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22247
22248 - atomic_inc(&vcc->stats->rx_drop);
22249 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22250 return -ENOMEM;
22251 }
22252
22253 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22254
22255 dev_kfree_skb_any(skb);
22256
22257 - atomic_inc(&vcc->stats->rx_drop);
22258 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22259 return -ENOMEM;
22260 }
22261
22262 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22263
22264 vcc->push(vcc, skb);
22265 - atomic_inc(&vcc->stats->rx);
22266 + atomic_inc_unchecked(&vcc->stats->rx);
22267
22268 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22269
22270 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22271 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22272 fore200e->atm_dev->number,
22273 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22274 - atomic_inc(&vcc->stats->rx_err);
22275 + atomic_inc_unchecked(&vcc->stats->rx_err);
22276 }
22277 }
22278
22279 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22280 goto retry_here;
22281 }
22282
22283 - atomic_inc(&vcc->stats->tx_err);
22284 + atomic_inc_unchecked(&vcc->stats->tx_err);
22285
22286 fore200e->tx_sat++;
22287 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22288 diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22289 --- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22290 +++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22291 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22292
22293 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22294 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22295 - atomic_inc(&vcc->stats->rx_drop);
22296 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22297 goto return_host_buffers;
22298 }
22299
22300 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22301 RBRQ_LEN_ERR(he_dev->rbrq_head)
22302 ? "LEN_ERR" : "",
22303 vcc->vpi, vcc->vci);
22304 - atomic_inc(&vcc->stats->rx_err);
22305 + atomic_inc_unchecked(&vcc->stats->rx_err);
22306 goto return_host_buffers;
22307 }
22308
22309 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22310 vcc->push(vcc, skb);
22311 spin_lock(&he_dev->global_lock);
22312
22313 - atomic_inc(&vcc->stats->rx);
22314 + atomic_inc_unchecked(&vcc->stats->rx);
22315
22316 return_host_buffers:
22317 ++pdus_assembled;
22318 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22319 tpd->vcc->pop(tpd->vcc, tpd->skb);
22320 else
22321 dev_kfree_skb_any(tpd->skb);
22322 - atomic_inc(&tpd->vcc->stats->tx_err);
22323 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22324 }
22325 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22326 return;
22327 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22328 vcc->pop(vcc, skb);
22329 else
22330 dev_kfree_skb_any(skb);
22331 - atomic_inc(&vcc->stats->tx_err);
22332 + atomic_inc_unchecked(&vcc->stats->tx_err);
22333 return -EINVAL;
22334 }
22335
22336 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22337 vcc->pop(vcc, skb);
22338 else
22339 dev_kfree_skb_any(skb);
22340 - atomic_inc(&vcc->stats->tx_err);
22341 + atomic_inc_unchecked(&vcc->stats->tx_err);
22342 return -EINVAL;
22343 }
22344 #endif
22345 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22346 vcc->pop(vcc, skb);
22347 else
22348 dev_kfree_skb_any(skb);
22349 - atomic_inc(&vcc->stats->tx_err);
22350 + atomic_inc_unchecked(&vcc->stats->tx_err);
22351 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22352 return -ENOMEM;
22353 }
22354 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22355 vcc->pop(vcc, skb);
22356 else
22357 dev_kfree_skb_any(skb);
22358 - atomic_inc(&vcc->stats->tx_err);
22359 + atomic_inc_unchecked(&vcc->stats->tx_err);
22360 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22361 return -ENOMEM;
22362 }
22363 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22364 __enqueue_tpd(he_dev, tpd, cid);
22365 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22366
22367 - atomic_inc(&vcc->stats->tx);
22368 + atomic_inc_unchecked(&vcc->stats->tx);
22369
22370 return 0;
22371 }
22372 diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22373 --- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22374 +++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22375 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22376 {
22377 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22378 // VC layer stats
22379 - atomic_inc(&vcc->stats->rx);
22380 + atomic_inc_unchecked(&vcc->stats->rx);
22381 __net_timestamp(skb);
22382 // end of our responsibility
22383 vcc->push (vcc, skb);
22384 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22385 dev->tx_iovec = NULL;
22386
22387 // VC layer stats
22388 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22389 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22390
22391 // free the skb
22392 hrz_kfree_skb (skb);
22393 diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22394 --- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22395 +++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22396 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22397 else
22398 dev_kfree_skb(skb);
22399
22400 - atomic_inc(&vcc->stats->tx);
22401 + atomic_inc_unchecked(&vcc->stats->tx);
22402 }
22403
22404 atomic_dec(&scq->used);
22405 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22406 if ((sb = dev_alloc_skb(64)) == NULL) {
22407 printk("%s: Can't allocate buffers for aal0.\n",
22408 card->name);
22409 - atomic_add(i, &vcc->stats->rx_drop);
22410 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22411 break;
22412 }
22413 if (!atm_charge(vcc, sb->truesize)) {
22414 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22415 card->name);
22416 - atomic_add(i - 1, &vcc->stats->rx_drop);
22417 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22418 dev_kfree_skb(sb);
22419 break;
22420 }
22421 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22422 ATM_SKB(sb)->vcc = vcc;
22423 __net_timestamp(sb);
22424 vcc->push(vcc, sb);
22425 - atomic_inc(&vcc->stats->rx);
22426 + atomic_inc_unchecked(&vcc->stats->rx);
22427
22428 cell += ATM_CELL_PAYLOAD;
22429 }
22430 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22431 "(CDC: %08x)\n",
22432 card->name, len, rpp->len, readl(SAR_REG_CDC));
22433 recycle_rx_pool_skb(card, rpp);
22434 - atomic_inc(&vcc->stats->rx_err);
22435 + atomic_inc_unchecked(&vcc->stats->rx_err);
22436 return;
22437 }
22438 if (stat & SAR_RSQE_CRC) {
22439 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22440 recycle_rx_pool_skb(card, rpp);
22441 - atomic_inc(&vcc->stats->rx_err);
22442 + atomic_inc_unchecked(&vcc->stats->rx_err);
22443 return;
22444 }
22445 if (skb_queue_len(&rpp->queue) > 1) {
22446 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22447 RXPRINTK("%s: Can't alloc RX skb.\n",
22448 card->name);
22449 recycle_rx_pool_skb(card, rpp);
22450 - atomic_inc(&vcc->stats->rx_err);
22451 + atomic_inc_unchecked(&vcc->stats->rx_err);
22452 return;
22453 }
22454 if (!atm_charge(vcc, skb->truesize)) {
22455 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22456 __net_timestamp(skb);
22457
22458 vcc->push(vcc, skb);
22459 - atomic_inc(&vcc->stats->rx);
22460 + atomic_inc_unchecked(&vcc->stats->rx);
22461
22462 return;
22463 }
22464 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22465 __net_timestamp(skb);
22466
22467 vcc->push(vcc, skb);
22468 - atomic_inc(&vcc->stats->rx);
22469 + atomic_inc_unchecked(&vcc->stats->rx);
22470
22471 if (skb->truesize > SAR_FB_SIZE_3)
22472 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22473 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22474 if (vcc->qos.aal != ATM_AAL0) {
22475 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22476 card->name, vpi, vci);
22477 - atomic_inc(&vcc->stats->rx_drop);
22478 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22479 goto drop;
22480 }
22481
22482 if ((sb = dev_alloc_skb(64)) == NULL) {
22483 printk("%s: Can't allocate buffers for AAL0.\n",
22484 card->name);
22485 - atomic_inc(&vcc->stats->rx_err);
22486 + atomic_inc_unchecked(&vcc->stats->rx_err);
22487 goto drop;
22488 }
22489
22490 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22491 ATM_SKB(sb)->vcc = vcc;
22492 __net_timestamp(sb);
22493 vcc->push(vcc, sb);
22494 - atomic_inc(&vcc->stats->rx);
22495 + atomic_inc_unchecked(&vcc->stats->rx);
22496
22497 drop:
22498 skb_pull(queue, 64);
22499 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22500
22501 if (vc == NULL) {
22502 printk("%s: NULL connection in send().\n", card->name);
22503 - atomic_inc(&vcc->stats->tx_err);
22504 + atomic_inc_unchecked(&vcc->stats->tx_err);
22505 dev_kfree_skb(skb);
22506 return -EINVAL;
22507 }
22508 if (!test_bit(VCF_TX, &vc->flags)) {
22509 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22510 - atomic_inc(&vcc->stats->tx_err);
22511 + atomic_inc_unchecked(&vcc->stats->tx_err);
22512 dev_kfree_skb(skb);
22513 return -EINVAL;
22514 }
22515 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22516 break;
22517 default:
22518 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22519 - atomic_inc(&vcc->stats->tx_err);
22520 + atomic_inc_unchecked(&vcc->stats->tx_err);
22521 dev_kfree_skb(skb);
22522 return -EINVAL;
22523 }
22524
22525 if (skb_shinfo(skb)->nr_frags != 0) {
22526 printk("%s: No scatter-gather yet.\n", card->name);
22527 - atomic_inc(&vcc->stats->tx_err);
22528 + atomic_inc_unchecked(&vcc->stats->tx_err);
22529 dev_kfree_skb(skb);
22530 return -EINVAL;
22531 }
22532 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22533
22534 err = queue_skb(card, vc, skb, oam);
22535 if (err) {
22536 - atomic_inc(&vcc->stats->tx_err);
22537 + atomic_inc_unchecked(&vcc->stats->tx_err);
22538 dev_kfree_skb(skb);
22539 return err;
22540 }
22541 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22542 skb = dev_alloc_skb(64);
22543 if (!skb) {
22544 printk("%s: Out of memory in send_oam().\n", card->name);
22545 - atomic_inc(&vcc->stats->tx_err);
22546 + atomic_inc_unchecked(&vcc->stats->tx_err);
22547 return -ENOMEM;
22548 }
22549 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22550 diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
22551 --- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22552 +++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22553 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22554 status = (u_short) (buf_desc_ptr->desc_mode);
22555 if (status & (RX_CER | RX_PTE | RX_OFL))
22556 {
22557 - atomic_inc(&vcc->stats->rx_err);
22558 + atomic_inc_unchecked(&vcc->stats->rx_err);
22559 IF_ERR(printk("IA: bad packet, dropping it");)
22560 if (status & RX_CER) {
22561 IF_ERR(printk(" cause: packet CRC error\n");)
22562 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22563 len = dma_addr - buf_addr;
22564 if (len > iadev->rx_buf_sz) {
22565 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22566 - atomic_inc(&vcc->stats->rx_err);
22567 + atomic_inc_unchecked(&vcc->stats->rx_err);
22568 goto out_free_desc;
22569 }
22570
22571 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22572 ia_vcc = INPH_IA_VCC(vcc);
22573 if (ia_vcc == NULL)
22574 {
22575 - atomic_inc(&vcc->stats->rx_err);
22576 + atomic_inc_unchecked(&vcc->stats->rx_err);
22577 dev_kfree_skb_any(skb);
22578 atm_return(vcc, atm_guess_pdu2truesize(len));
22579 goto INCR_DLE;
22580 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22581 if ((length > iadev->rx_buf_sz) || (length >
22582 (skb->len - sizeof(struct cpcs_trailer))))
22583 {
22584 - atomic_inc(&vcc->stats->rx_err);
22585 + atomic_inc_unchecked(&vcc->stats->rx_err);
22586 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22587 length, skb->len);)
22588 dev_kfree_skb_any(skb);
22589 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22590
22591 IF_RX(printk("rx_dle_intr: skb push");)
22592 vcc->push(vcc,skb);
22593 - atomic_inc(&vcc->stats->rx);
22594 + atomic_inc_unchecked(&vcc->stats->rx);
22595 iadev->rx_pkt_cnt++;
22596 }
22597 INCR_DLE:
22598 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22599 {
22600 struct k_sonet_stats *stats;
22601 stats = &PRIV(_ia_dev[board])->sonet_stats;
22602 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22603 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22604 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22605 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22606 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22607 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22608 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22609 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22610 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22611 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22612 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22613 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22614 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22615 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22616 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22617 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22618 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22619 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22620 }
22621 ia_cmds.status = 0;
22622 break;
22623 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22624 if ((desc == 0) || (desc > iadev->num_tx_desc))
22625 {
22626 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22627 - atomic_inc(&vcc->stats->tx);
22628 + atomic_inc_unchecked(&vcc->stats->tx);
22629 if (vcc->pop)
22630 vcc->pop(vcc, skb);
22631 else
22632 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22633 ATM_DESC(skb) = vcc->vci;
22634 skb_queue_tail(&iadev->tx_dma_q, skb);
22635
22636 - atomic_inc(&vcc->stats->tx);
22637 + atomic_inc_unchecked(&vcc->stats->tx);
22638 iadev->tx_pkt_cnt++;
22639 /* Increment transaction counter */
22640 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22641
22642 #if 0
22643 /* add flow control logic */
22644 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22645 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22646 if (iavcc->vc_desc_cnt > 10) {
22647 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22648 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22649 diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
22650 --- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22651 +++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22652 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22653 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22654 lanai_endtx(lanai, lvcc);
22655 lanai_free_skb(lvcc->tx.atmvcc, skb);
22656 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22657 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22658 }
22659
22660 /* Try to fill the buffer - don't call unless there is backlog */
22661 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22662 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22663 __net_timestamp(skb);
22664 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22665 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22666 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22667 out:
22668 lvcc->rx.buf.ptr = end;
22669 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22670 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22671 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22672 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22673 lanai->stats.service_rxnotaal5++;
22674 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22675 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22676 return 0;
22677 }
22678 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22679 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22680 int bytes;
22681 read_unlock(&vcc_sklist_lock);
22682 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22683 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22684 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22685 lvcc->stats.x.aal5.service_trash++;
22686 bytes = (SERVICE_GET_END(s) * 16) -
22687 (((unsigned long) lvcc->rx.buf.ptr) -
22688 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22689 }
22690 if (s & SERVICE_STREAM) {
22691 read_unlock(&vcc_sklist_lock);
22692 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22693 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22694 lvcc->stats.x.aal5.service_stream++;
22695 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22696 "PDU on VCI %d!\n", lanai->number, vci);
22697 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22698 return 0;
22699 }
22700 DPRINTK("got rx crc error on vci %d\n", vci);
22701 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22702 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22703 lvcc->stats.x.aal5.service_rxcrc++;
22704 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22705 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22706 diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
22707 --- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22708 +++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22709 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22710 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22711 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22712 card->index);
22713 - atomic_inc(&vcc->stats->tx_err);
22714 + atomic_inc_unchecked(&vcc->stats->tx_err);
22715 dev_kfree_skb_any(skb);
22716 return -EINVAL;
22717 }
22718 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22719 if (!vc->tx) {
22720 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22721 card->index);
22722 - atomic_inc(&vcc->stats->tx_err);
22723 + atomic_inc_unchecked(&vcc->stats->tx_err);
22724 dev_kfree_skb_any(skb);
22725 return -EINVAL;
22726 }
22727 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22728 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22729 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22730 card->index);
22731 - atomic_inc(&vcc->stats->tx_err);
22732 + atomic_inc_unchecked(&vcc->stats->tx_err);
22733 dev_kfree_skb_any(skb);
22734 return -EINVAL;
22735 }
22736
22737 if (skb_shinfo(skb)->nr_frags != 0) {
22738 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22739 - atomic_inc(&vcc->stats->tx_err);
22740 + atomic_inc_unchecked(&vcc->stats->tx_err);
22741 dev_kfree_skb_any(skb);
22742 return -EINVAL;
22743 }
22744 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22745 }
22746
22747 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22748 - atomic_inc(&vcc->stats->tx_err);
22749 + atomic_inc_unchecked(&vcc->stats->tx_err);
22750 dev_kfree_skb_any(skb);
22751 return -EIO;
22752 }
22753 - atomic_inc(&vcc->stats->tx);
22754 + atomic_inc_unchecked(&vcc->stats->tx);
22755
22756 return 0;
22757 }
22758 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22759 printk
22760 ("nicstar%d: Can't allocate buffers for aal0.\n",
22761 card->index);
22762 - atomic_add(i, &vcc->stats->rx_drop);
22763 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22764 break;
22765 }
22766 if (!atm_charge(vcc, sb->truesize)) {
22767 RXPRINTK
22768 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22769 card->index);
22770 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22771 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22772 dev_kfree_skb_any(sb);
22773 break;
22774 }
22775 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22776 ATM_SKB(sb)->vcc = vcc;
22777 __net_timestamp(sb);
22778 vcc->push(vcc, sb);
22779 - atomic_inc(&vcc->stats->rx);
22780 + atomic_inc_unchecked(&vcc->stats->rx);
22781 cell += ATM_CELL_PAYLOAD;
22782 }
22783
22784 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22785 if (iovb == NULL) {
22786 printk("nicstar%d: Out of iovec buffers.\n",
22787 card->index);
22788 - atomic_inc(&vcc->stats->rx_drop);
22789 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22790 recycle_rx_buf(card, skb);
22791 return;
22792 }
22793 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22794 small or large buffer itself. */
22795 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22796 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22797 - atomic_inc(&vcc->stats->rx_err);
22798 + atomic_inc_unchecked(&vcc->stats->rx_err);
22799 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22800 NS_MAX_IOVECS);
22801 NS_PRV_IOVCNT(iovb) = 0;
22802 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22803 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22804 card->index);
22805 which_list(card, skb);
22806 - atomic_inc(&vcc->stats->rx_err);
22807 + atomic_inc_unchecked(&vcc->stats->rx_err);
22808 recycle_rx_buf(card, skb);
22809 vc->rx_iov = NULL;
22810 recycle_iov_buf(card, iovb);
22811 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22812 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22813 card->index);
22814 which_list(card, skb);
22815 - atomic_inc(&vcc->stats->rx_err);
22816 + atomic_inc_unchecked(&vcc->stats->rx_err);
22817 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22818 NS_PRV_IOVCNT(iovb));
22819 vc->rx_iov = NULL;
22820 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22821 printk(" - PDU size mismatch.\n");
22822 else
22823 printk(".\n");
22824 - atomic_inc(&vcc->stats->rx_err);
22825 + atomic_inc_unchecked(&vcc->stats->rx_err);
22826 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22827 NS_PRV_IOVCNT(iovb));
22828 vc->rx_iov = NULL;
22829 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22830 /* skb points to a small buffer */
22831 if (!atm_charge(vcc, skb->truesize)) {
22832 push_rxbufs(card, skb);
22833 - atomic_inc(&vcc->stats->rx_drop);
22834 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22835 } else {
22836 skb_put(skb, len);
22837 dequeue_sm_buf(card, skb);
22838 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22839 ATM_SKB(skb)->vcc = vcc;
22840 __net_timestamp(skb);
22841 vcc->push(vcc, skb);
22842 - atomic_inc(&vcc->stats->rx);
22843 + atomic_inc_unchecked(&vcc->stats->rx);
22844 }
22845 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22846 struct sk_buff *sb;
22847 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22848 if (len <= NS_SMBUFSIZE) {
22849 if (!atm_charge(vcc, sb->truesize)) {
22850 push_rxbufs(card, sb);
22851 - atomic_inc(&vcc->stats->rx_drop);
22852 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22853 } else {
22854 skb_put(sb, len);
22855 dequeue_sm_buf(card, sb);
22856 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22857 ATM_SKB(sb)->vcc = vcc;
22858 __net_timestamp(sb);
22859 vcc->push(vcc, sb);
22860 - atomic_inc(&vcc->stats->rx);
22861 + atomic_inc_unchecked(&vcc->stats->rx);
22862 }
22863
22864 push_rxbufs(card, skb);
22865 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22866
22867 if (!atm_charge(vcc, skb->truesize)) {
22868 push_rxbufs(card, skb);
22869 - atomic_inc(&vcc->stats->rx_drop);
22870 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22871 } else {
22872 dequeue_lg_buf(card, skb);
22873 #ifdef NS_USE_DESTRUCTORS
22874 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22875 ATM_SKB(skb)->vcc = vcc;
22876 __net_timestamp(skb);
22877 vcc->push(vcc, skb);
22878 - atomic_inc(&vcc->stats->rx);
22879 + atomic_inc_unchecked(&vcc->stats->rx);
22880 }
22881
22882 push_rxbufs(card, sb);
22883 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22884 printk
22885 ("nicstar%d: Out of huge buffers.\n",
22886 card->index);
22887 - atomic_inc(&vcc->stats->rx_drop);
22888 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22889 recycle_iovec_rx_bufs(card,
22890 (struct iovec *)
22891 iovb->data,
22892 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22893 card->hbpool.count++;
22894 } else
22895 dev_kfree_skb_any(hb);
22896 - atomic_inc(&vcc->stats->rx_drop);
22897 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22898 } else {
22899 /* Copy the small buffer to the huge buffer */
22900 sb = (struct sk_buff *)iov->iov_base;
22901 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22902 #endif /* NS_USE_DESTRUCTORS */
22903 __net_timestamp(hb);
22904 vcc->push(vcc, hb);
22905 - atomic_inc(&vcc->stats->rx);
22906 + atomic_inc_unchecked(&vcc->stats->rx);
22907 }
22908 }
22909
22910 diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
22911 --- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22912 +++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22913 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22914 }
22915 atm_charge(vcc, skb->truesize);
22916 vcc->push(vcc, skb);
22917 - atomic_inc(&vcc->stats->rx);
22918 + atomic_inc_unchecked(&vcc->stats->rx);
22919 break;
22920
22921 case PKT_STATUS:
22922 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22923 char msg[500];
22924 char item[10];
22925
22926 + pax_track_stack();
22927 +
22928 len = buf->len;
22929 for (i = 0; i < len; i++){
22930 if(i % 8 == 0)
22931 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22932 vcc = SKB_CB(oldskb)->vcc;
22933
22934 if (vcc) {
22935 - atomic_inc(&vcc->stats->tx);
22936 + atomic_inc_unchecked(&vcc->stats->tx);
22937 solos_pop(vcc, oldskb);
22938 } else
22939 dev_kfree_skb_irq(oldskb);
22940 diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
22941 --- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22942 +++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22943 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22944
22945
22946 #define ADD_LIMITED(s,v) \
22947 - atomic_add((v),&stats->s); \
22948 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22949 + atomic_add_unchecked((v),&stats->s); \
22950 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22951
22952
22953 static void suni_hz(unsigned long from_timer)
22954 diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
22955 --- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22956 +++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22957 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22958 struct sonet_stats tmp;
22959 int error = 0;
22960
22961 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22962 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22963 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22964 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22965 if (zero && !error) {
22966 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22967
22968
22969 #define ADD_LIMITED(s,v) \
22970 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22971 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22972 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22973 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22974 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22975 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22976
22977
22978 static void stat_event(struct atm_dev *dev)
22979 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22980 if (reason & uPD98402_INT_PFM) stat_event(dev);
22981 if (reason & uPD98402_INT_PCO) {
22982 (void) GET(PCOCR); /* clear interrupt cause */
22983 - atomic_add(GET(HECCT),
22984 + atomic_add_unchecked(GET(HECCT),
22985 &PRIV(dev)->sonet_stats.uncorr_hcs);
22986 }
22987 if ((reason & uPD98402_INT_RFO) &&
22988 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22989 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22990 uPD98402_INT_LOS),PIMR); /* enable them */
22991 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22992 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22993 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22994 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22995 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22996 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22997 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22998 return 0;
22999 }
23000
23001 diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23002 --- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23003 +++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23004 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23005 }
23006 if (!size) {
23007 dev_kfree_skb_irq(skb);
23008 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23009 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23010 continue;
23011 }
23012 if (!atm_charge(vcc,skb->truesize)) {
23013 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23014 skb->len = size;
23015 ATM_SKB(skb)->vcc = vcc;
23016 vcc->push(vcc,skb);
23017 - atomic_inc(&vcc->stats->rx);
23018 + atomic_inc_unchecked(&vcc->stats->rx);
23019 }
23020 zout(pos & 0xffff,MTA(mbx));
23021 #if 0 /* probably a stupid idea */
23022 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23023 skb_queue_head(&zatm_vcc->backlog,skb);
23024 break;
23025 }
23026 - atomic_inc(&vcc->stats->tx);
23027 + atomic_inc_unchecked(&vcc->stats->tx);
23028 wake_up(&zatm_vcc->tx_wait);
23029 }
23030
23031 diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23032 --- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23033 +++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23034 @@ -29,14 +29,14 @@ bool events_check_enabled;
23035 * They need to be modified together atomically, so it's better to use one
23036 * atomic variable to hold them both.
23037 */
23038 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23039 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23040
23041 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23042 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23043
23044 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23045 {
23046 - unsigned int comb = atomic_read(&combined_event_count);
23047 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23048
23049 *cnt = (comb >> IN_PROGRESS_BITS);
23050 *inpr = comb & MAX_IN_PROGRESS;
23051 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23052 ws->last_time = ktime_get();
23053
23054 /* Increment the counter of events in progress. */
23055 - atomic_inc(&combined_event_count);
23056 + atomic_inc_unchecked(&combined_event_count);
23057 }
23058
23059 /**
23060 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23061 * Increment the counter of registered wakeup events and decrement the
23062 * couter of wakeup events in progress simultaneously.
23063 */
23064 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23065 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23066 }
23067
23068 /**
23069 diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23070 --- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23071 +++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23072 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23073 int err;
23074 u32 cp;
23075
23076 + memset(&arg64, 0, sizeof(arg64));
23077 +
23078 err = 0;
23079 err |=
23080 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23081 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23082 while (!list_empty(&h->reqQ)) {
23083 c = list_entry(h->reqQ.next, CommandList_struct, list);
23084 /* can't do anything if fifo is full */
23085 - if ((h->access.fifo_full(h))) {
23086 + if ((h->access->fifo_full(h))) {
23087 dev_warn(&h->pdev->dev, "fifo full\n");
23088 break;
23089 }
23090 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23091 h->Qdepth--;
23092
23093 /* Tell the controller execute command */
23094 - h->access.submit_command(h, c);
23095 + h->access->submit_command(h, c);
23096
23097 /* Put job onto the completed Q */
23098 addQ(&h->cmpQ, c);
23099 @@ -3422,17 +3424,17 @@ startio:
23100
23101 static inline unsigned long get_next_completion(ctlr_info_t *h)
23102 {
23103 - return h->access.command_completed(h);
23104 + return h->access->command_completed(h);
23105 }
23106
23107 static inline int interrupt_pending(ctlr_info_t *h)
23108 {
23109 - return h->access.intr_pending(h);
23110 + return h->access->intr_pending(h);
23111 }
23112
23113 static inline long interrupt_not_for_us(ctlr_info_t *h)
23114 {
23115 - return ((h->access.intr_pending(h) == 0) ||
23116 + return ((h->access->intr_pending(h) == 0) ||
23117 (h->interrupts_enabled == 0));
23118 }
23119
23120 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23121 u32 a;
23122
23123 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23124 - return h->access.command_completed(h);
23125 + return h->access->command_completed(h);
23126
23127 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23128 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23129 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23130 trans_support & CFGTBL_Trans_use_short_tags);
23131
23132 /* Change the access methods to the performant access methods */
23133 - h->access = SA5_performant_access;
23134 + h->access = &SA5_performant_access;
23135 h->transMethod = CFGTBL_Trans_Performant;
23136
23137 return;
23138 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23139 if (prod_index < 0)
23140 return -ENODEV;
23141 h->product_name = products[prod_index].product_name;
23142 - h->access = *(products[prod_index].access);
23143 + h->access = products[prod_index].access;
23144
23145 if (cciss_board_disabled(h)) {
23146 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23147 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23148 }
23149
23150 /* make sure the board interrupts are off */
23151 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23152 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23153 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23154 if (rc)
23155 goto clean2;
23156 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23157 * fake ones to scoop up any residual completions.
23158 */
23159 spin_lock_irqsave(&h->lock, flags);
23160 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23161 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23162 spin_unlock_irqrestore(&h->lock, flags);
23163 free_irq(h->intr[PERF_MODE_INT], h);
23164 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23165 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23166 dev_info(&h->pdev->dev, "Board READY.\n");
23167 dev_info(&h->pdev->dev,
23168 "Waiting for stale completions to drain.\n");
23169 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23170 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23171 msleep(10000);
23172 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23173 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23174
23175 rc = controller_reset_failed(h->cfgtable);
23176 if (rc)
23177 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23178 cciss_scsi_setup(h);
23179
23180 /* Turn the interrupts on so we can service requests */
23181 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23182 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23183
23184 /* Get the firmware version */
23185 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23186 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23187 kfree(flush_buf);
23188 if (return_code != IO_OK)
23189 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23190 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23191 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23192 free_irq(h->intr[PERF_MODE_INT], h);
23193 }
23194
23195 diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23196 --- linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23197 +++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23198 @@ -100,7 +100,7 @@ struct ctlr_info
23199 /* information about each logical volume */
23200 drive_info_struct *drv[CISS_MAX_LUN];
23201
23202 - struct access_method access;
23203 + struct access_method *access;
23204
23205 /* queue and queue Info */
23206 struct list_head reqQ;
23207 diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23208 --- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23209 +++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23210 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23211 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23212 goto Enomem4;
23213 }
23214 - hba[i]->access.set_intr_mask(hba[i], 0);
23215 + hba[i]->access->set_intr_mask(hba[i], 0);
23216 if (request_irq(hba[i]->intr, do_ida_intr,
23217 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23218 {
23219 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23220 add_timer(&hba[i]->timer);
23221
23222 /* Enable IRQ now that spinlock and rate limit timer are set up */
23223 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23224 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23225
23226 for(j=0; j<NWD; j++) {
23227 struct gendisk *disk = ida_gendisk[i][j];
23228 @@ -694,7 +694,7 @@ DBGINFO(
23229 for(i=0; i<NR_PRODUCTS; i++) {
23230 if (board_id == products[i].board_id) {
23231 c->product_name = products[i].product_name;
23232 - c->access = *(products[i].access);
23233 + c->access = products[i].access;
23234 break;
23235 }
23236 }
23237 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23238 hba[ctlr]->intr = intr;
23239 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23240 hba[ctlr]->product_name = products[j].product_name;
23241 - hba[ctlr]->access = *(products[j].access);
23242 + hba[ctlr]->access = products[j].access;
23243 hba[ctlr]->ctlr = ctlr;
23244 hba[ctlr]->board_id = board_id;
23245 hba[ctlr]->pci_dev = NULL; /* not PCI */
23246 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23247 struct scatterlist tmp_sg[SG_MAX];
23248 int i, dir, seg;
23249
23250 + pax_track_stack();
23251 +
23252 queue_next:
23253 creq = blk_peek_request(q);
23254 if (!creq)
23255 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23256
23257 while((c = h->reqQ) != NULL) {
23258 /* Can't do anything if we're busy */
23259 - if (h->access.fifo_full(h) == 0)
23260 + if (h->access->fifo_full(h) == 0)
23261 return;
23262
23263 /* Get the first entry from the request Q */
23264 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23265 h->Qdepth--;
23266
23267 /* Tell the controller to do our bidding */
23268 - h->access.submit_command(h, c);
23269 + h->access->submit_command(h, c);
23270
23271 /* Get onto the completion Q */
23272 addQ(&h->cmpQ, c);
23273 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23274 unsigned long flags;
23275 __u32 a,a1;
23276
23277 - istat = h->access.intr_pending(h);
23278 + istat = h->access->intr_pending(h);
23279 /* Is this interrupt for us? */
23280 if (istat == 0)
23281 return IRQ_NONE;
23282 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23283 */
23284 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23285 if (istat & FIFO_NOT_EMPTY) {
23286 - while((a = h->access.command_completed(h))) {
23287 + while((a = h->access->command_completed(h))) {
23288 a1 = a; a &= ~3;
23289 if ((c = h->cmpQ) == NULL)
23290 {
23291 @@ -1449,11 +1451,11 @@ static int sendcmd(
23292 /*
23293 * Disable interrupt
23294 */
23295 - info_p->access.set_intr_mask(info_p, 0);
23296 + info_p->access->set_intr_mask(info_p, 0);
23297 /* Make sure there is room in the command FIFO */
23298 /* Actually it should be completely empty at this time. */
23299 for (i = 200000; i > 0; i--) {
23300 - temp = info_p->access.fifo_full(info_p);
23301 + temp = info_p->access->fifo_full(info_p);
23302 if (temp != 0) {
23303 break;
23304 }
23305 @@ -1466,7 +1468,7 @@ DBG(
23306 /*
23307 * Send the cmd
23308 */
23309 - info_p->access.submit_command(info_p, c);
23310 + info_p->access->submit_command(info_p, c);
23311 complete = pollcomplete(ctlr);
23312
23313 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23314 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23315 * we check the new geometry. Then turn interrupts back on when
23316 * we're done.
23317 */
23318 - host->access.set_intr_mask(host, 0);
23319 + host->access->set_intr_mask(host, 0);
23320 getgeometry(ctlr);
23321 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23322 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23323
23324 for(i=0; i<NWD; i++) {
23325 struct gendisk *disk = ida_gendisk[ctlr][i];
23326 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23327 /* Wait (up to 2 seconds) for a command to complete */
23328
23329 for (i = 200000; i > 0; i--) {
23330 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23331 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23332 if (done == 0) {
23333 udelay(10); /* a short fixed delay */
23334 } else
23335 diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23336 --- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23337 +++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23338 @@ -99,7 +99,7 @@ struct ctlr_info {
23339 drv_info_t drv[NWD];
23340 struct proc_dir_entry *proc;
23341
23342 - struct access_method access;
23343 + struct access_method *access;
23344
23345 cmdlist_t *reqQ;
23346 cmdlist_t *cmpQ;
23347 diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23348 --- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23349 +++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23350 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23351 unsigned long flags;
23352 int Channel, TargetID;
23353
23354 + pax_track_stack();
23355 +
23356 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23357 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23358 sizeof(DAC960_SCSI_Inquiry_T) +
23359 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23360 --- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23361 +++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23362 @@ -737,7 +737,7 @@ struct drbd_request;
23363 struct drbd_epoch {
23364 struct list_head list;
23365 unsigned int barrier_nr;
23366 - atomic_t epoch_size; /* increased on every request added. */
23367 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23368 atomic_t active; /* increased on every req. added, and dec on every finished. */
23369 unsigned long flags;
23370 };
23371 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23372 void *int_dig_in;
23373 void *int_dig_vv;
23374 wait_queue_head_t seq_wait;
23375 - atomic_t packet_seq;
23376 + atomic_unchecked_t packet_seq;
23377 unsigned int peer_seq;
23378 spinlock_t peer_seq_lock;
23379 unsigned int minor;
23380 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23381 --- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23382 +++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23383 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23384 p.sector = sector;
23385 p.block_id = block_id;
23386 p.blksize = blksize;
23387 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23388 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23389
23390 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23391 return false;
23392 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23393 p.sector = cpu_to_be64(req->sector);
23394 p.block_id = (unsigned long)req;
23395 p.seq_num = cpu_to_be32(req->seq_num =
23396 - atomic_add_return(1, &mdev->packet_seq));
23397 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23398
23399 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23400
23401 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23402 atomic_set(&mdev->unacked_cnt, 0);
23403 atomic_set(&mdev->local_cnt, 0);
23404 atomic_set(&mdev->net_cnt, 0);
23405 - atomic_set(&mdev->packet_seq, 0);
23406 + atomic_set_unchecked(&mdev->packet_seq, 0);
23407 atomic_set(&mdev->pp_in_use, 0);
23408 atomic_set(&mdev->pp_in_use_by_net, 0);
23409 atomic_set(&mdev->rs_sect_in, 0);
23410 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23411 mdev->receiver.t_state);
23412
23413 /* no need to lock it, I'm the only thread alive */
23414 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23415 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23416 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23417 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23418 mdev->al_writ_cnt =
23419 mdev->bm_writ_cnt =
23420 mdev->read_cnt =
23421 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23422 --- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23423 +++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23424 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23425 module_put(THIS_MODULE);
23426 }
23427
23428 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23429 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23430
23431 static unsigned short *
23432 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23433 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23434 cn_reply->id.idx = CN_IDX_DRBD;
23435 cn_reply->id.val = CN_VAL_DRBD;
23436
23437 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23438 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23439 cn_reply->ack = 0; /* not used here. */
23440 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23441 (int)((char *)tl - (char *)reply->tag_list);
23442 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23443 cn_reply->id.idx = CN_IDX_DRBD;
23444 cn_reply->id.val = CN_VAL_DRBD;
23445
23446 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23447 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23448 cn_reply->ack = 0; /* not used here. */
23449 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23450 (int)((char *)tl - (char *)reply->tag_list);
23451 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23452 cn_reply->id.idx = CN_IDX_DRBD;
23453 cn_reply->id.val = CN_VAL_DRBD;
23454
23455 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23456 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23457 cn_reply->ack = 0; // not used here.
23458 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23459 (int)((char*)tl - (char*)reply->tag_list);
23460 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23461 cn_reply->id.idx = CN_IDX_DRBD;
23462 cn_reply->id.val = CN_VAL_DRBD;
23463
23464 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23465 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23466 cn_reply->ack = 0; /* not used here. */
23467 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23468 (int)((char *)tl - (char *)reply->tag_list);
23469 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
23470 --- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23471 +++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23472 @@ -894,7 +894,7 @@ retry:
23473 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23474 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23475
23476 - atomic_set(&mdev->packet_seq, 0);
23477 + atomic_set_unchecked(&mdev->packet_seq, 0);
23478 mdev->peer_seq = 0;
23479
23480 drbd_thread_start(&mdev->asender);
23481 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23482 do {
23483 next_epoch = NULL;
23484
23485 - epoch_size = atomic_read(&epoch->epoch_size);
23486 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23487
23488 switch (ev & ~EV_CLEANUP) {
23489 case EV_PUT:
23490 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23491 rv = FE_DESTROYED;
23492 } else {
23493 epoch->flags = 0;
23494 - atomic_set(&epoch->epoch_size, 0);
23495 + atomic_set_unchecked(&epoch->epoch_size, 0);
23496 /* atomic_set(&epoch->active, 0); is already zero */
23497 if (rv == FE_STILL_LIVE)
23498 rv = FE_RECYCLED;
23499 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23500 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23501 drbd_flush(mdev);
23502
23503 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23504 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23505 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23506 if (epoch)
23507 break;
23508 }
23509
23510 epoch = mdev->current_epoch;
23511 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23512 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23513
23514 D_ASSERT(atomic_read(&epoch->active) == 0);
23515 D_ASSERT(epoch->flags == 0);
23516 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23517 }
23518
23519 epoch->flags = 0;
23520 - atomic_set(&epoch->epoch_size, 0);
23521 + atomic_set_unchecked(&epoch->epoch_size, 0);
23522 atomic_set(&epoch->active, 0);
23523
23524 spin_lock(&mdev->epoch_lock);
23525 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23526 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23527 list_add(&epoch->list, &mdev->current_epoch->list);
23528 mdev->current_epoch = epoch;
23529 mdev->epochs++;
23530 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23531 spin_unlock(&mdev->peer_seq_lock);
23532
23533 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23534 - atomic_inc(&mdev->current_epoch->epoch_size);
23535 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23536 return drbd_drain_block(mdev, data_size);
23537 }
23538
23539 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23540
23541 spin_lock(&mdev->epoch_lock);
23542 e->epoch = mdev->current_epoch;
23543 - atomic_inc(&e->epoch->epoch_size);
23544 + atomic_inc_unchecked(&e->epoch->epoch_size);
23545 atomic_inc(&e->epoch->active);
23546 spin_unlock(&mdev->epoch_lock);
23547
23548 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23549 D_ASSERT(list_empty(&mdev->done_ee));
23550
23551 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23552 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23553 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23554 D_ASSERT(list_empty(&mdev->current_epoch->list));
23555 }
23556
23557 diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
23558 --- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23559 +++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23560 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23561 struct kvec iov;
23562 sigset_t blocked, oldset;
23563
23564 + pax_track_stack();
23565 +
23566 if (unlikely(!sock)) {
23567 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23568 lo->disk->disk_name, (send ? "send" : "recv"));
23569 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23570 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23571 unsigned int cmd, unsigned long arg)
23572 {
23573 + pax_track_stack();
23574 +
23575 switch (cmd) {
23576 case NBD_DISCONNECT: {
23577 struct request sreq;
23578 diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
23579 --- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23580 +++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23581 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23582 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23583 return -EFAULT;
23584
23585 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23586 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23587 return -EFAULT;
23588
23589 client = agp_find_client_by_pid(reserve.pid);
23590 diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
23591 --- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23592 +++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23593 @@ -9,6 +9,7 @@
23594 #include <linux/types.h>
23595 #include <linux/errno.h>
23596 #include <linux/tty.h>
23597 +#include <linux/mutex.h>
23598 #include <linux/timer.h>
23599 #include <linux/kernel.h>
23600 #include <linux/wait.h>
23601 @@ -34,6 +35,7 @@ static int vfd_is_open;
23602 static unsigned char vfd[40];
23603 static int vfd_cursor;
23604 static unsigned char ledpb, led;
23605 +static DEFINE_MUTEX(vfd_mutex);
23606
23607 static void update_vfd(void)
23608 {
23609 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23610 if (!vfd_is_open)
23611 return -EBUSY;
23612
23613 + mutex_lock(&vfd_mutex);
23614 for (;;) {
23615 char c;
23616 if (!indx)
23617 break;
23618 - if (get_user(c, buf))
23619 + if (get_user(c, buf)) {
23620 + mutex_unlock(&vfd_mutex);
23621 return -EFAULT;
23622 + }
23623 if (esc) {
23624 set_led(c);
23625 esc = 0;
23626 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23627 buf++;
23628 }
23629 update_vfd();
23630 + mutex_unlock(&vfd_mutex);
23631
23632 return len;
23633 }
23634 diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
23635 --- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23636 +++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23637 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23638 switch (cmd) {
23639
23640 case RTC_PLL_GET:
23641 + memset(&pll, 0, sizeof(pll));
23642 if (get_rtc_pll(&pll))
23643 return -EINVAL;
23644 else
23645 diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
23646 --- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23647 +++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23648 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23649 }
23650
23651 static int
23652 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23653 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23654 struct hpet_info *info)
23655 {
23656 struct hpet_timer __iomem *timer;
23657 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
23658 --- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23659 +++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23660 @@ -415,7 +415,7 @@ struct ipmi_smi {
23661 struct proc_dir_entry *proc_dir;
23662 char proc_dir_name[10];
23663
23664 - atomic_t stats[IPMI_NUM_STATS];
23665 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23666
23667 /*
23668 * run_to_completion duplicate of smb_info, smi_info
23669 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23670
23671
23672 #define ipmi_inc_stat(intf, stat) \
23673 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23674 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23675 #define ipmi_get_stat(intf, stat) \
23676 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23677 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23678
23679 static int is_lan_addr(struct ipmi_addr *addr)
23680 {
23681 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23682 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23683 init_waitqueue_head(&intf->waitq);
23684 for (i = 0; i < IPMI_NUM_STATS; i++)
23685 - atomic_set(&intf->stats[i], 0);
23686 + atomic_set_unchecked(&intf->stats[i], 0);
23687
23688 intf->proc_dir = NULL;
23689
23690 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23691 struct ipmi_smi_msg smi_msg;
23692 struct ipmi_recv_msg recv_msg;
23693
23694 + pax_track_stack();
23695 +
23696 si = (struct ipmi_system_interface_addr *) &addr;
23697 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23698 si->channel = IPMI_BMC_CHANNEL;
23699 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
23700 --- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23701 +++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23702 @@ -277,7 +277,7 @@ struct smi_info {
23703 unsigned char slave_addr;
23704
23705 /* Counters and things for the proc filesystem. */
23706 - atomic_t stats[SI_NUM_STATS];
23707 + atomic_unchecked_t stats[SI_NUM_STATS];
23708
23709 struct task_struct *thread;
23710
23711 @@ -286,9 +286,9 @@ struct smi_info {
23712 };
23713
23714 #define smi_inc_stat(smi, stat) \
23715 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23716 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23717 #define smi_get_stat(smi, stat) \
23718 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23719 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23720
23721 #define SI_MAX_PARMS 4
23722
23723 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23724 atomic_set(&new_smi->req_events, 0);
23725 new_smi->run_to_completion = 0;
23726 for (i = 0; i < SI_NUM_STATS; i++)
23727 - atomic_set(&new_smi->stats[i], 0);
23728 + atomic_set_unchecked(&new_smi->stats[i], 0);
23729
23730 new_smi->interrupt_disabled = 1;
23731 atomic_set(&new_smi->stop_operation, 0);
23732 diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
23733 --- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23734 +++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23735 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23736
23737 config DEVKMEM
23738 bool "/dev/kmem virtual device support"
23739 - default y
23740 + default n
23741 + depends on !GRKERNSEC_KMEM
23742 help
23743 Say Y here if you want to support the /dev/kmem device. The
23744 /dev/kmem device is rarely used, but can be used for certain
23745 @@ -596,6 +597,7 @@ config DEVPORT
23746 bool
23747 depends on !M68K
23748 depends on ISA || PCI
23749 + depends on !GRKERNSEC_KMEM
23750 default y
23751
23752 source "drivers/s390/char/Kconfig"
23753 diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
23754 --- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23755 +++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23756 @@ -18,6 +18,7 @@
23757 #include <linux/raw.h>
23758 #include <linux/tty.h>
23759 #include <linux/capability.h>
23760 +#include <linux/security.h>
23761 #include <linux/ptrace.h>
23762 #include <linux/device.h>
23763 #include <linux/highmem.h>
23764 @@ -34,6 +35,10 @@
23765 # include <linux/efi.h>
23766 #endif
23767
23768 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23769 +extern struct file_operations grsec_fops;
23770 +#endif
23771 +
23772 static inline unsigned long size_inside_page(unsigned long start,
23773 unsigned long size)
23774 {
23775 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23776
23777 while (cursor < to) {
23778 if (!devmem_is_allowed(pfn)) {
23779 +#ifdef CONFIG_GRKERNSEC_KMEM
23780 + gr_handle_mem_readwrite(from, to);
23781 +#else
23782 printk(KERN_INFO
23783 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23784 current->comm, from, to);
23785 +#endif
23786 return 0;
23787 }
23788 cursor += PAGE_SIZE;
23789 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23790 }
23791 return 1;
23792 }
23793 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23794 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23795 +{
23796 + return 0;
23797 +}
23798 #else
23799 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23800 {
23801 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23802
23803 while (count > 0) {
23804 unsigned long remaining;
23805 + char *temp;
23806
23807 sz = size_inside_page(p, count);
23808
23809 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23810 if (!ptr)
23811 return -EFAULT;
23812
23813 - remaining = copy_to_user(buf, ptr, sz);
23814 +#ifdef CONFIG_PAX_USERCOPY
23815 + temp = kmalloc(sz, GFP_KERNEL);
23816 + if (!temp) {
23817 + unxlate_dev_mem_ptr(p, ptr);
23818 + return -ENOMEM;
23819 + }
23820 + memcpy(temp, ptr, sz);
23821 +#else
23822 + temp = ptr;
23823 +#endif
23824 +
23825 + remaining = copy_to_user(buf, temp, sz);
23826 +
23827 +#ifdef CONFIG_PAX_USERCOPY
23828 + kfree(temp);
23829 +#endif
23830 +
23831 unxlate_dev_mem_ptr(p, ptr);
23832 if (remaining)
23833 return -EFAULT;
23834 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23835 size_t count, loff_t *ppos)
23836 {
23837 unsigned long p = *ppos;
23838 - ssize_t low_count, read, sz;
23839 + ssize_t low_count, read, sz, err = 0;
23840 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23841 - int err = 0;
23842
23843 read = 0;
23844 if (p < (unsigned long) high_memory) {
23845 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23846 }
23847 #endif
23848 while (low_count > 0) {
23849 + char *temp;
23850 +
23851 sz = size_inside_page(p, low_count);
23852
23853 /*
23854 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23855 */
23856 kbuf = xlate_dev_kmem_ptr((char *)p);
23857
23858 - if (copy_to_user(buf, kbuf, sz))
23859 +#ifdef CONFIG_PAX_USERCOPY
23860 + temp = kmalloc(sz, GFP_KERNEL);
23861 + if (!temp)
23862 + return -ENOMEM;
23863 + memcpy(temp, kbuf, sz);
23864 +#else
23865 + temp = kbuf;
23866 +#endif
23867 +
23868 + err = copy_to_user(buf, temp, sz);
23869 +
23870 +#ifdef CONFIG_PAX_USERCOPY
23871 + kfree(temp);
23872 +#endif
23873 +
23874 + if (err)
23875 return -EFAULT;
23876 buf += sz;
23877 p += sz;
23878 @@ -866,6 +913,9 @@ static const struct memdev {
23879 #ifdef CONFIG_CRASH_DUMP
23880 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23881 #endif
23882 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23883 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23884 +#endif
23885 };
23886
23887 static int memory_open(struct inode *inode, struct file *filp)
23888 diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
23889 --- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23890 +++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23891 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23892
23893 spin_unlock_irq(&rtc_lock);
23894
23895 - if (copy_to_user(buf, contents, tmp - contents))
23896 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23897 return -EFAULT;
23898
23899 *ppos = i;
23900 diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
23901 --- linux-3.0.4/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23902 +++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23903 @@ -261,8 +261,13 @@
23904 /*
23905 * Configuration information
23906 */
23907 +#ifdef CONFIG_GRKERNSEC_RANDNET
23908 +#define INPUT_POOL_WORDS 512
23909 +#define OUTPUT_POOL_WORDS 128
23910 +#else
23911 #define INPUT_POOL_WORDS 128
23912 #define OUTPUT_POOL_WORDS 32
23913 +#endif
23914 #define SEC_XFER_SIZE 512
23915 #define EXTRACT_SIZE 10
23916
23917 @@ -300,10 +305,17 @@ static struct poolinfo {
23918 int poolwords;
23919 int tap1, tap2, tap3, tap4, tap5;
23920 } poolinfo_table[] = {
23921 +#ifdef CONFIG_GRKERNSEC_RANDNET
23922 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23923 + { 512, 411, 308, 208, 104, 1 },
23924 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23925 + { 128, 103, 76, 51, 25, 1 },
23926 +#else
23927 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23928 { 128, 103, 76, 51, 25, 1 },
23929 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23930 { 32, 26, 20, 14, 7, 1 },
23931 +#endif
23932 #if 0
23933 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23934 { 2048, 1638, 1231, 819, 411, 1 },
23935 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23936
23937 extract_buf(r, tmp);
23938 i = min_t(int, nbytes, EXTRACT_SIZE);
23939 - if (copy_to_user(buf, tmp, i)) {
23940 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23941 ret = -EFAULT;
23942 break;
23943 }
23944 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23945 #include <linux/sysctl.h>
23946
23947 static int min_read_thresh = 8, min_write_thresh;
23948 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23949 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23950 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23951 static char sysctl_bootid[16];
23952
23953 diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
23954 --- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23955 +++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23956 @@ -55,6 +55,7 @@
23957 #include <asm/uaccess.h>
23958 #include <asm/io.h>
23959 #include <asm/system.h>
23960 +#include <asm/local.h>
23961
23962 #include <linux/sonypi.h>
23963
23964 @@ -491,7 +492,7 @@ static struct sonypi_device {
23965 spinlock_t fifo_lock;
23966 wait_queue_head_t fifo_proc_list;
23967 struct fasync_struct *fifo_async;
23968 - int open_count;
23969 + local_t open_count;
23970 int model;
23971 struct input_dev *input_jog_dev;
23972 struct input_dev *input_key_dev;
23973 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23974 static int sonypi_misc_release(struct inode *inode, struct file *file)
23975 {
23976 mutex_lock(&sonypi_device.lock);
23977 - sonypi_device.open_count--;
23978 + local_dec(&sonypi_device.open_count);
23979 mutex_unlock(&sonypi_device.lock);
23980 return 0;
23981 }
23982 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23983 {
23984 mutex_lock(&sonypi_device.lock);
23985 /* Flush input queue on first open */
23986 - if (!sonypi_device.open_count)
23987 + if (!local_read(&sonypi_device.open_count))
23988 kfifo_reset(&sonypi_device.fifo);
23989 - sonypi_device.open_count++;
23990 + local_inc(&sonypi_device.open_count);
23991 mutex_unlock(&sonypi_device.lock);
23992
23993 return 0;
23994 diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
23995 --- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23996 +++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23997 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23998 event = addr;
23999
24000 if ((event->event_type == 0 && event->event_size == 0) ||
24001 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24002 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24003 return NULL;
24004
24005 return addr;
24006 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24007 return NULL;
24008
24009 if ((event->event_type == 0 && event->event_size == 0) ||
24010 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24011 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24012 return NULL;
24013
24014 (*pos)++;
24015 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24016 int i;
24017
24018 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24019 - seq_putc(m, data[i]);
24020 + if (!seq_putc(m, data[i]))
24021 + return -EFAULT;
24022
24023 return 0;
24024 }
24025 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24026 log->bios_event_log_end = log->bios_event_log + len;
24027
24028 virt = acpi_os_map_memory(start, len);
24029 + if (!virt) {
24030 + kfree(log->bios_event_log);
24031 + log->bios_event_log = NULL;
24032 + return -EFAULT;
24033 + }
24034
24035 memcpy(log->bios_event_log, virt, len);
24036
24037 diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24038 --- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24039 +++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24040 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24041 chip->vendor.req_complete_val)
24042 goto out_recv;
24043
24044 - if ((status == chip->vendor.req_canceled)) {
24045 + if (status == chip->vendor.req_canceled) {
24046 dev_err(chip->dev, "Operation Canceled\n");
24047 rc = -ECANCELED;
24048 goto out;
24049 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24050
24051 struct tpm_chip *chip = dev_get_drvdata(dev);
24052
24053 + pax_track_stack();
24054 +
24055 tpm_cmd.header.in = tpm_readpubek_header;
24056 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24057 "attempting to read the PUBEK");
24058 diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24059 --- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24060 +++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24061 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24062 0xCA, 0x34, 0x2B, 0x2E};
24063 struct scatterlist sg;
24064
24065 + pax_track_stack();
24066 +
24067 memset(src, 0, sizeof(src));
24068 memset(ctx.key, 0, sizeof(ctx.key));
24069
24070 diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24071 --- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24072 +++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24073 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24074 struct crypto_aes_ctx gen_aes;
24075 int cpu;
24076
24077 + pax_track_stack();
24078 +
24079 if (key_len % 8) {
24080 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24081 return -EINVAL;
24082 diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24083 --- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24084 +++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24085 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24086 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24087 static int edac_pci_poll_msec = 1000; /* one second workq period */
24088
24089 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24090 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24091 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24092 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24093
24094 static struct kobject *edac_pci_top_main_kobj;
24095 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24096 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24097 edac_printk(KERN_CRIT, EDAC_PCI,
24098 "Signaled System Error on %s\n",
24099 pci_name(dev));
24100 - atomic_inc(&pci_nonparity_count);
24101 + atomic_inc_unchecked(&pci_nonparity_count);
24102 }
24103
24104 if (status & (PCI_STATUS_PARITY)) {
24105 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24106 "Master Data Parity Error on %s\n",
24107 pci_name(dev));
24108
24109 - atomic_inc(&pci_parity_count);
24110 + atomic_inc_unchecked(&pci_parity_count);
24111 }
24112
24113 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24114 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24115 "Detected Parity Error on %s\n",
24116 pci_name(dev));
24117
24118 - atomic_inc(&pci_parity_count);
24119 + atomic_inc_unchecked(&pci_parity_count);
24120 }
24121 }
24122
24123 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24124 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24125 "Signaled System Error on %s\n",
24126 pci_name(dev));
24127 - atomic_inc(&pci_nonparity_count);
24128 + atomic_inc_unchecked(&pci_nonparity_count);
24129 }
24130
24131 if (status & (PCI_STATUS_PARITY)) {
24132 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24133 "Master Data Parity Error on "
24134 "%s\n", pci_name(dev));
24135
24136 - atomic_inc(&pci_parity_count);
24137 + atomic_inc_unchecked(&pci_parity_count);
24138 }
24139
24140 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24141 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24142 "Detected Parity Error on %s\n",
24143 pci_name(dev));
24144
24145 - atomic_inc(&pci_parity_count);
24146 + atomic_inc_unchecked(&pci_parity_count);
24147 }
24148 }
24149 }
24150 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24151 if (!check_pci_errors)
24152 return;
24153
24154 - before_count = atomic_read(&pci_parity_count);
24155 + before_count = atomic_read_unchecked(&pci_parity_count);
24156
24157 /* scan all PCI devices looking for a Parity Error on devices and
24158 * bridges.
24159 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24160 /* Only if operator has selected panic on PCI Error */
24161 if (edac_pci_get_panic_on_pe()) {
24162 /* If the count is different 'after' from 'before' */
24163 - if (before_count != atomic_read(&pci_parity_count))
24164 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24165 panic("EDAC: PCI Parity Error");
24166 }
24167 }
24168 diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24169 --- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24170 +++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24171 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24172 bool (*dc_mce)(u16, u8);
24173 bool (*ic_mce)(u16, u8);
24174 bool (*nb_mce)(u16, u8);
24175 -};
24176 +} __no_const;
24177
24178 void amd_report_gart_errors(bool);
24179 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24180 diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24181 --- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24182 +++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24183 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24184
24185 void fw_core_remove_card(struct fw_card *card)
24186 {
24187 - struct fw_card_driver dummy_driver = dummy_driver_template;
24188 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24189
24190 card->driver->update_phy_reg(card, 4,
24191 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24192 diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24193 --- linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24194 +++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24195 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24196 int ret;
24197
24198 if ((request->channels == 0 && request->bandwidth == 0) ||
24199 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24200 - request->bandwidth < 0)
24201 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24202 return -EINVAL;
24203
24204 r = kmalloc(sizeof(*r), GFP_KERNEL);
24205 diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24206 --- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24207 +++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24208 @@ -101,6 +101,7 @@ struct fw_card_driver {
24209
24210 int (*stop_iso)(struct fw_iso_context *ctx);
24211 };
24212 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24213
24214 void fw_card_initialize(struct fw_card *card,
24215 const struct fw_card_driver *driver, struct device *device);
24216 diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24217 --- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24218 +++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24219 @@ -37,6 +37,7 @@
24220 #include <linux/timer.h>
24221 #include <linux/types.h>
24222 #include <linux/workqueue.h>
24223 +#include <linux/sched.h>
24224
24225 #include <asm/byteorder.h>
24226
24227 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24228 struct transaction_callback_data d;
24229 struct fw_transaction t;
24230
24231 + pax_track_stack();
24232 +
24233 init_timer_on_stack(&t.split_timeout_timer);
24234 init_completion(&d.done);
24235 d.payload = payload;
24236 diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24237 --- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24238 +++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24239 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24240 }
24241 }
24242 else {
24243 - /*
24244 - * no iounmap() for that ioremap(); it would be a no-op, but
24245 - * it's so early in setup that sucker gets confused into doing
24246 - * what it shouldn't if we actually call it.
24247 - */
24248 p = dmi_ioremap(0xF0000, 0x10000);
24249 if (p == NULL)
24250 goto error;
24251 diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24252 --- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24253 +++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24254 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24255 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24256 maskl, pendl, maskh, pendh);
24257
24258 - atomic_inc(&irq_err_count);
24259 + atomic_inc_unchecked(&irq_err_count);
24260
24261 return -EINVAL;
24262 }
24263 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24264 --- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24265 +++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24266 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24267 struct drm_crtc *tmp;
24268 int crtc_mask = 1;
24269
24270 - WARN(!crtc, "checking null crtc?\n");
24271 + BUG_ON(!crtc);
24272
24273 dev = crtc->dev;
24274
24275 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24276 struct drm_encoder *encoder;
24277 bool ret = true;
24278
24279 + pax_track_stack();
24280 +
24281 crtc->enabled = drm_helper_crtc_in_use(crtc);
24282 if (!crtc->enabled)
24283 return true;
24284 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24285 --- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24286 +++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24287 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24288
24289 dev = file_priv->minor->dev;
24290 atomic_inc(&dev->ioctl_count);
24291 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24292 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24293 ++file_priv->ioctl_count;
24294
24295 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24296 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24297 --- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24298 +++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24299 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24300 }
24301
24302 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24303 - atomic_set(&dev->counts[i], 0);
24304 + atomic_set_unchecked(&dev->counts[i], 0);
24305
24306 dev->sigdata.lock = NULL;
24307
24308 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24309
24310 retcode = drm_open_helper(inode, filp, dev);
24311 if (!retcode) {
24312 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24313 - if (!dev->open_count++)
24314 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24315 + if (local_inc_return(&dev->open_count) == 1)
24316 retcode = drm_setup(dev);
24317 }
24318 if (!retcode) {
24319 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24320
24321 mutex_lock(&drm_global_mutex);
24322
24323 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24324 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24325
24326 if (dev->driver->preclose)
24327 dev->driver->preclose(dev, file_priv);
24328 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24329 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24330 task_pid_nr(current),
24331 (long)old_encode_dev(file_priv->minor->device),
24332 - dev->open_count);
24333 + local_read(&dev->open_count));
24334
24335 /* if the master has gone away we can't do anything with the lock */
24336 if (file_priv->minor->master)
24337 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24338 * End inline drm_release
24339 */
24340
24341 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24342 - if (!--dev->open_count) {
24343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24344 + if (local_dec_and_test(&dev->open_count)) {
24345 if (atomic_read(&dev->ioctl_count)) {
24346 DRM_ERROR("Device busy: %d\n",
24347 atomic_read(&dev->ioctl_count));
24348 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24349 --- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24350 +++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24351 @@ -36,7 +36,7 @@
24352 struct drm_global_item {
24353 struct mutex mutex;
24354 void *object;
24355 - int refcount;
24356 + atomic_t refcount;
24357 };
24358
24359 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24360 @@ -49,7 +49,7 @@ void drm_global_init(void)
24361 struct drm_global_item *item = &glob[i];
24362 mutex_init(&item->mutex);
24363 item->object = NULL;
24364 - item->refcount = 0;
24365 + atomic_set(&item->refcount, 0);
24366 }
24367 }
24368
24369 @@ -59,7 +59,7 @@ void drm_global_release(void)
24370 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24371 struct drm_global_item *item = &glob[i];
24372 BUG_ON(item->object != NULL);
24373 - BUG_ON(item->refcount != 0);
24374 + BUG_ON(atomic_read(&item->refcount) != 0);
24375 }
24376 }
24377
24378 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24379 void *object;
24380
24381 mutex_lock(&item->mutex);
24382 - if (item->refcount == 0) {
24383 + if (atomic_read(&item->refcount) == 0) {
24384 item->object = kzalloc(ref->size, GFP_KERNEL);
24385 if (unlikely(item->object == NULL)) {
24386 ret = -ENOMEM;
24387 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24388 goto out_err;
24389
24390 }
24391 - ++item->refcount;
24392 + atomic_inc(&item->refcount);
24393 ref->object = item->object;
24394 object = item->object;
24395 mutex_unlock(&item->mutex);
24396 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24397 struct drm_global_item *item = &glob[ref->global_type];
24398
24399 mutex_lock(&item->mutex);
24400 - BUG_ON(item->refcount == 0);
24401 + BUG_ON(atomic_read(&item->refcount) == 0);
24402 BUG_ON(ref->object != item->object);
24403 - if (--item->refcount == 0) {
24404 + if (atomic_dec_and_test(&item->refcount)) {
24405 ref->release(ref);
24406 item->object = NULL;
24407 }
24408 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24409 --- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24410 +++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24411 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24412 struct drm_local_map *map;
24413 struct drm_map_list *r_list;
24414
24415 - /* Hardcoded from _DRM_FRAME_BUFFER,
24416 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24417 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24418 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24419 + static const char * const types[] = {
24420 + [_DRM_FRAME_BUFFER] = "FB",
24421 + [_DRM_REGISTERS] = "REG",
24422 + [_DRM_SHM] = "SHM",
24423 + [_DRM_AGP] = "AGP",
24424 + [_DRM_SCATTER_GATHER] = "SG",
24425 + [_DRM_CONSISTENT] = "PCI",
24426 + [_DRM_GEM] = "GEM" };
24427 const char *type;
24428 int i;
24429
24430 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24431 map = r_list->map;
24432 if (!map)
24433 continue;
24434 - if (map->type < 0 || map->type > 5)
24435 + if (map->type >= ARRAY_SIZE(types))
24436 type = "??";
24437 else
24438 type = types[map->type];
24439 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24440 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24441 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24442 vma->vm_flags & VM_IO ? 'i' : '-',
24443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24444 + 0);
24445 +#else
24446 vma->vm_pgoff);
24447 +#endif
24448
24449 #if defined(__i386__)
24450 pgprot = pgprot_val(vma->vm_page_prot);
24451 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
24452 --- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24453 +++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24454 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24455 stats->data[i].value =
24456 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24457 else
24458 - stats->data[i].value = atomic_read(&dev->counts[i]);
24459 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24460 stats->data[i].type = dev->types[i];
24461 }
24462
24463 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
24464 --- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24465 +++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24466 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24467 if (drm_lock_take(&master->lock, lock->context)) {
24468 master->lock.file_priv = file_priv;
24469 master->lock.lock_time = jiffies;
24470 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24471 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24472 break; /* Got lock */
24473 }
24474
24475 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24476 return -EINVAL;
24477 }
24478
24479 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24480 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24481
24482 if (drm_lock_free(&master->lock, lock->context)) {
24483 /* FIXME: Should really bail out here. */
24484 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
24485 --- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24486 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24487 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24488 dma->buflist[vertex->idx],
24489 vertex->discard, vertex->used);
24490
24491 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24492 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24493 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24494 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24495 sarea_priv->last_enqueue = dev_priv->counter - 1;
24496 sarea_priv->last_dispatch = (int)hw_status[5];
24497
24498 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24499 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24500 mc->last_render);
24501
24502 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24503 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24504 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24505 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24506 sarea_priv->last_enqueue = dev_priv->counter - 1;
24507 sarea_priv->last_dispatch = (int)hw_status[5];
24508
24509 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
24510 --- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24511 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24512 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24513 int page_flipping;
24514
24515 wait_queue_head_t irq_queue;
24516 - atomic_t irq_received;
24517 - atomic_t irq_emitted;
24518 + atomic_unchecked_t irq_received;
24519 + atomic_unchecked_t irq_emitted;
24520
24521 int front_offset;
24522 } drm_i810_private_t;
24523 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
24524 --- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24525 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24526 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24527 I915_READ(GTIMR));
24528 }
24529 seq_printf(m, "Interrupts received: %d\n",
24530 - atomic_read(&dev_priv->irq_received));
24531 + atomic_read_unchecked(&dev_priv->irq_received));
24532 for (i = 0; i < I915_NUM_RINGS; i++) {
24533 if (IS_GEN6(dev)) {
24534 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24535 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
24536 --- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24537 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24538 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24539 bool can_switch;
24540
24541 spin_lock(&dev->count_lock);
24542 - can_switch = (dev->open_count == 0);
24543 + can_switch = (local_read(&dev->open_count) == 0);
24544 spin_unlock(&dev->count_lock);
24545 return can_switch;
24546 }
24547 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
24548 --- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24549 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24550 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24551 /* render clock increase/decrease */
24552 /* display clock increase/decrease */
24553 /* pll clock increase/decrease */
24554 -};
24555 +} __no_const;
24556
24557 struct intel_device_info {
24558 u8 gen;
24559 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24560 int current_page;
24561 int page_flipping;
24562
24563 - atomic_t irq_received;
24564 + atomic_unchecked_t irq_received;
24565
24566 /* protects the irq masks */
24567 spinlock_t irq_lock;
24568 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24569 * will be page flipped away on the next vblank. When it
24570 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24571 */
24572 - atomic_t pending_flip;
24573 + atomic_unchecked_t pending_flip;
24574 };
24575
24576 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24577 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24578 extern void intel_teardown_gmbus(struct drm_device *dev);
24579 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24580 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24581 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24582 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24583 {
24584 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24585 }
24586 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24587 --- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24588 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24589 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24590 i915_gem_clflush_object(obj);
24591
24592 if (obj->base.pending_write_domain)
24593 - cd->flips |= atomic_read(&obj->pending_flip);
24594 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24595
24596 /* The actual obj->write_domain will be updated with
24597 * pending_write_domain after we emit the accumulated flush for all
24598 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
24599 --- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24600 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24601 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24602 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24603 struct drm_i915_master_private *master_priv;
24604
24605 - atomic_inc(&dev_priv->irq_received);
24606 + atomic_inc_unchecked(&dev_priv->irq_received);
24607
24608 /* disable master interrupt before clearing iir */
24609 de_ier = I915_READ(DEIER);
24610 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24611 struct drm_i915_master_private *master_priv;
24612 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24613
24614 - atomic_inc(&dev_priv->irq_received);
24615 + atomic_inc_unchecked(&dev_priv->irq_received);
24616
24617 if (IS_GEN6(dev))
24618 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24619 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24620 int ret = IRQ_NONE, pipe;
24621 bool blc_event = false;
24622
24623 - atomic_inc(&dev_priv->irq_received);
24624 + atomic_inc_unchecked(&dev_priv->irq_received);
24625
24626 iir = I915_READ(IIR);
24627
24628 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24629 {
24630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24631
24632 - atomic_set(&dev_priv->irq_received, 0);
24633 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24634
24635 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24636 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24637 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24638 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24639 int pipe;
24640
24641 - atomic_set(&dev_priv->irq_received, 0);
24642 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24643
24644 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24645 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24646 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
24647 --- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24648 +++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24649 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24650
24651 wait_event(dev_priv->pending_flip_queue,
24652 atomic_read(&dev_priv->mm.wedged) ||
24653 - atomic_read(&obj->pending_flip) == 0);
24654 + atomic_read_unchecked(&obj->pending_flip) == 0);
24655
24656 /* Big Hammer, we also need to ensure that any pending
24657 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24658 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24659 obj = to_intel_framebuffer(crtc->fb)->obj;
24660 dev_priv = crtc->dev->dev_private;
24661 wait_event(dev_priv->pending_flip_queue,
24662 - atomic_read(&obj->pending_flip) == 0);
24663 + atomic_read_unchecked(&obj->pending_flip) == 0);
24664 }
24665
24666 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24667 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24668
24669 atomic_clear_mask(1 << intel_crtc->plane,
24670 &obj->pending_flip.counter);
24671 - if (atomic_read(&obj->pending_flip) == 0)
24672 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24673 wake_up(&dev_priv->pending_flip_queue);
24674
24675 schedule_work(&work->work);
24676 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24677 /* Block clients from rendering to the new back buffer until
24678 * the flip occurs and the object is no longer visible.
24679 */
24680 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24681 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24682
24683 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24684 if (ret)
24685 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24686 return 0;
24687
24688 cleanup_pending:
24689 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24690 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24691 cleanup_objs:
24692 drm_gem_object_unreference(&work->old_fb_obj->base);
24693 drm_gem_object_unreference(&obj->base);
24694 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
24695 --- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24696 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24697 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24698 u32 clear_cmd;
24699 u32 maccess;
24700
24701 - atomic_t vbl_received; /**< Number of vblanks received. */
24702 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24703 wait_queue_head_t fence_queue;
24704 - atomic_t last_fence_retired;
24705 + atomic_unchecked_t last_fence_retired;
24706 u32 next_fence_to_post;
24707
24708 unsigned int fb_cpp;
24709 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
24710 --- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24711 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24712 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24713 if (crtc != 0)
24714 return 0;
24715
24716 - return atomic_read(&dev_priv->vbl_received);
24717 + return atomic_read_unchecked(&dev_priv->vbl_received);
24718 }
24719
24720
24721 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24722 /* VBLANK interrupt */
24723 if (status & MGA_VLINEPEN) {
24724 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24725 - atomic_inc(&dev_priv->vbl_received);
24726 + atomic_inc_unchecked(&dev_priv->vbl_received);
24727 drm_handle_vblank(dev, 0);
24728 handled = 1;
24729 }
24730 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24731 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24732 MGA_WRITE(MGA_PRIMEND, prim_end);
24733
24734 - atomic_inc(&dev_priv->last_fence_retired);
24735 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24736 DRM_WAKEUP(&dev_priv->fence_queue);
24737 handled = 1;
24738 }
24739 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24740 * using fences.
24741 */
24742 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24743 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24744 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24745 - *sequence) <= (1 << 23)));
24746
24747 *sequence = cur_fence;
24748 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
24749 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24750 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24751 @@ -200,7 +200,7 @@ struct methods {
24752 const char desc[8];
24753 void (*loadbios)(struct drm_device *, uint8_t *);
24754 const bool rw;
24755 -};
24756 +} __do_const;
24757
24758 static struct methods shadow_methods[] = {
24759 { "PRAMIN", load_vbios_pramin, true },
24760 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24761 struct bit_table {
24762 const char id;
24763 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24764 -};
24765 +} __no_const;
24766
24767 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24768
24769 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24770 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24771 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24772 @@ -227,7 +227,7 @@ struct nouveau_channel {
24773 struct list_head pending;
24774 uint32_t sequence;
24775 uint32_t sequence_ack;
24776 - atomic_t last_sequence_irq;
24777 + atomic_unchecked_t last_sequence_irq;
24778 } fence;
24779
24780 /* DMA push buffer */
24781 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24782 u32 handle, u16 class);
24783 void (*set_tile_region)(struct drm_device *dev, int i);
24784 void (*tlb_flush)(struct drm_device *, int engine);
24785 -};
24786 +} __no_const;
24787
24788 struct nouveau_instmem_engine {
24789 void *priv;
24790 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24791 struct nouveau_mc_engine {
24792 int (*init)(struct drm_device *dev);
24793 void (*takedown)(struct drm_device *dev);
24794 -};
24795 +} __no_const;
24796
24797 struct nouveau_timer_engine {
24798 int (*init)(struct drm_device *dev);
24799 void (*takedown)(struct drm_device *dev);
24800 uint64_t (*read)(struct drm_device *dev);
24801 -};
24802 +} __no_const;
24803
24804 struct nouveau_fb_engine {
24805 int num_tiles;
24806 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24807 void (*put)(struct drm_device *, struct nouveau_mem **);
24808
24809 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24810 -};
24811 +} __no_const;
24812
24813 struct nouveau_engine {
24814 struct nouveau_instmem_engine instmem;
24815 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24816 struct drm_global_reference mem_global_ref;
24817 struct ttm_bo_global_ref bo_global_ref;
24818 struct ttm_bo_device bdev;
24819 - atomic_t validate_sequence;
24820 + atomic_unchecked_t validate_sequence;
24821 } ttm;
24822
24823 struct {
24824 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24825 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24826 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24827 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24828 if (USE_REFCNT(dev))
24829 sequence = nvchan_rd32(chan, 0x48);
24830 else
24831 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24832 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24833
24834 if (chan->fence.sequence_ack == sequence)
24835 goto out;
24836 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24837
24838 INIT_LIST_HEAD(&chan->fence.pending);
24839 spin_lock_init(&chan->fence.lock);
24840 - atomic_set(&chan->fence.last_sequence_irq, 0);
24841 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24842 return 0;
24843 }
24844
24845 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24846 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24847 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24848 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24849 int trycnt = 0;
24850 int ret, i;
24851
24852 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24853 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24854 retry:
24855 if (++trycnt > 100000) {
24856 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24857 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
24858 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24859 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24860 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24861 bool can_switch;
24862
24863 spin_lock(&dev->count_lock);
24864 - can_switch = (dev->open_count == 0);
24865 + can_switch = (local_read(&dev->open_count) == 0);
24866 spin_unlock(&dev->count_lock);
24867 return can_switch;
24868 }
24869 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
24870 --- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24871 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24872 @@ -560,7 +560,7 @@ static int
24873 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24874 u32 class, u32 mthd, u32 data)
24875 {
24876 - atomic_set(&chan->fence.last_sequence_irq, data);
24877 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24878 return 0;
24879 }
24880
24881 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
24882 --- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24883 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24884 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24885
24886 /* GH: Simple idle check.
24887 */
24888 - atomic_set(&dev_priv->idle_count, 0);
24889 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24890
24891 /* We don't support anything other than bus-mastering ring mode,
24892 * but the ring can be in either AGP or PCI space for the ring
24893 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
24894 --- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24895 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24896 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24897 int is_pci;
24898 unsigned long cce_buffers_offset;
24899
24900 - atomic_t idle_count;
24901 + atomic_unchecked_t idle_count;
24902
24903 int page_flipping;
24904 int current_page;
24905 u32 crtc_offset;
24906 u32 crtc_offset_cntl;
24907
24908 - atomic_t vbl_received;
24909 + atomic_unchecked_t vbl_received;
24910
24911 u32 color_fmt;
24912 unsigned int front_offset;
24913 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
24914 --- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24915 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24916 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24917 if (crtc != 0)
24918 return 0;
24919
24920 - return atomic_read(&dev_priv->vbl_received);
24921 + return atomic_read_unchecked(&dev_priv->vbl_received);
24922 }
24923
24924 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24925 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24926 /* VBLANK interrupt */
24927 if (status & R128_CRTC_VBLANK_INT) {
24928 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24929 - atomic_inc(&dev_priv->vbl_received);
24930 + atomic_inc_unchecked(&dev_priv->vbl_received);
24931 drm_handle_vblank(dev, 0);
24932 return IRQ_HANDLED;
24933 }
24934 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
24935 --- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24936 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24937 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24938
24939 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24940 {
24941 - if (atomic_read(&dev_priv->idle_count) == 0)
24942 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24943 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24944 else
24945 - atomic_set(&dev_priv->idle_count, 0);
24946 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24947 }
24948
24949 #endif
24950 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
24951 --- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24952 +++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24953 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24954 char name[512];
24955 int i;
24956
24957 + pax_track_stack();
24958 +
24959 ctx->card = card;
24960 ctx->bios = bios;
24961
24962 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
24963 --- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24964 +++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24965 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24966 regex_t mask_rex;
24967 regmatch_t match[4];
24968 char buf[1024];
24969 - size_t end;
24970 + long end;
24971 int len;
24972 int done = 0;
24973 int r;
24974 unsigned o;
24975 struct offset *offset;
24976 char last_reg_s[10];
24977 - int last_reg;
24978 + unsigned long last_reg;
24979
24980 if (regcomp
24981 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24982 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
24983 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24984 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24985 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24986 struct radeon_gpio_rec gpio;
24987 struct radeon_hpd hpd;
24988
24989 + pax_track_stack();
24990 +
24991 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24992 return false;
24993
24994 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
24995 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24996 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24997 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24998 bool can_switch;
24999
25000 spin_lock(&dev->count_lock);
25001 - can_switch = (dev->open_count == 0);
25002 + can_switch = (local_read(&dev->open_count) == 0);
25003 spin_unlock(&dev->count_lock);
25004 return can_switch;
25005 }
25006 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25007 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25008 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25009 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25010 uint32_t post_div;
25011 u32 pll_out_min, pll_out_max;
25012
25013 + pax_track_stack();
25014 +
25015 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25016 freq = freq * 1000;
25017
25018 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25019 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25020 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25021 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25022
25023 /* SW interrupt */
25024 wait_queue_head_t swi_queue;
25025 - atomic_t swi_emitted;
25026 + atomic_unchecked_t swi_emitted;
25027 int vblank_crtc;
25028 uint32_t irq_enable_reg;
25029 uint32_t r500_disp_irq_reg;
25030 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25031 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25032 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25033 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25034 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25035 return 0;
25036 }
25037 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25038 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25039 if (!rdev->cp.ready)
25040 /* FIXME: cp is not running assume everythings is done right
25041 * away
25042 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25043 return r;
25044 }
25045 radeon_fence_write(rdev, 0);
25046 - atomic_set(&rdev->fence_drv.seq, 0);
25047 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25048 INIT_LIST_HEAD(&rdev->fence_drv.created);
25049 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25050 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25051 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25052 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25053 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25054 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25055 */
25056 struct radeon_fence_driver {
25057 uint32_t scratch_reg;
25058 - atomic_t seq;
25059 + atomic_unchecked_t seq;
25060 uint32_t last_seq;
25061 unsigned long last_jiffies;
25062 unsigned long last_timeout;
25063 @@ -960,7 +960,7 @@ struct radeon_asic {
25064 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25065 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25066 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25067 -};
25068 +} __no_const;
25069
25070 /*
25071 * Asic structures
25072 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25073 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25074 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25075 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25076 request = compat_alloc_user_space(sizeof(*request));
25077 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25078 || __put_user(req32.param, &request->param)
25079 - || __put_user((void __user *)(unsigned long)req32.value,
25080 + || __put_user((unsigned long)req32.value,
25081 &request->value))
25082 return -EFAULT;
25083
25084 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25085 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25086 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25087 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25088 unsigned int ret;
25089 RING_LOCALS;
25090
25091 - atomic_inc(&dev_priv->swi_emitted);
25092 - ret = atomic_read(&dev_priv->swi_emitted);
25093 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25094 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25095
25096 BEGIN_RING(4);
25097 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25098 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25099 drm_radeon_private_t *dev_priv =
25100 (drm_radeon_private_t *) dev->dev_private;
25101
25102 - atomic_set(&dev_priv->swi_emitted, 0);
25103 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25104 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25105
25106 dev->max_vblank_count = 0x001fffff;
25107 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25108 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25109 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25110 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25111 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25112 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25113
25114 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25115 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25116 sarea_priv->nbox * sizeof(depth_boxes[0])))
25117 return -EFAULT;
25118
25119 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25120 {
25121 drm_radeon_private_t *dev_priv = dev->dev_private;
25122 drm_radeon_getparam_t *param = data;
25123 - int value;
25124 + int value = 0;
25125
25126 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25127
25128 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25129 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25130 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25131 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25132 }
25133 if (unlikely(ttm_vm_ops == NULL)) {
25134 ttm_vm_ops = vma->vm_ops;
25135 - radeon_ttm_vm_ops = *ttm_vm_ops;
25136 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25137 + pax_open_kernel();
25138 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25139 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25140 + pax_close_kernel();
25141 }
25142 vma->vm_ops = &radeon_ttm_vm_ops;
25143 return 0;
25144 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25145 --- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25146 +++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25147 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25148 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25149 rdev->pm.sideport_bandwidth.full)
25150 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25151 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25152 + read_delay_latency.full = dfixed_const(800 * 1000);
25153 read_delay_latency.full = dfixed_div(read_delay_latency,
25154 rdev->pm.igp_sideport_mclk);
25155 + a.full = dfixed_const(370);
25156 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25157 } else {
25158 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25159 rdev->pm.k8_bandwidth.full)
25160 diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25161 --- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25162 +++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25163 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25164 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25165 struct shrink_control *sc)
25166 {
25167 - static atomic_t start_pool = ATOMIC_INIT(0);
25168 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25169 unsigned i;
25170 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25171 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25172 struct ttm_page_pool *pool;
25173 int shrink_pages = sc->nr_to_scan;
25174
25175 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25176 --- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25177 +++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25178 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25179 typedef uint32_t maskarray_t[5];
25180
25181 typedef struct drm_via_irq {
25182 - atomic_t irq_received;
25183 + atomic_unchecked_t irq_received;
25184 uint32_t pending_mask;
25185 uint32_t enable_mask;
25186 wait_queue_head_t irq_queue;
25187 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25188 struct timeval last_vblank;
25189 int last_vblank_valid;
25190 unsigned usec_per_vblank;
25191 - atomic_t vbl_received;
25192 + atomic_unchecked_t vbl_received;
25193 drm_via_state_t hc_state;
25194 char pci_buf[VIA_PCI_BUF_SIZE];
25195 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25196 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25197 --- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25198 +++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25199 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25200 if (crtc != 0)
25201 return 0;
25202
25203 - return atomic_read(&dev_priv->vbl_received);
25204 + return atomic_read_unchecked(&dev_priv->vbl_received);
25205 }
25206
25207 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25208 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25209
25210 status = VIA_READ(VIA_REG_INTERRUPT);
25211 if (status & VIA_IRQ_VBLANK_PENDING) {
25212 - atomic_inc(&dev_priv->vbl_received);
25213 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25214 + atomic_inc_unchecked(&dev_priv->vbl_received);
25215 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25216 do_gettimeofday(&cur_vblank);
25217 if (dev_priv->last_vblank_valid) {
25218 dev_priv->usec_per_vblank =
25219 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25220 dev_priv->last_vblank = cur_vblank;
25221 dev_priv->last_vblank_valid = 1;
25222 }
25223 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25224 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25225 DRM_DEBUG("US per vblank is: %u\n",
25226 dev_priv->usec_per_vblank);
25227 }
25228 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25229
25230 for (i = 0; i < dev_priv->num_irqs; ++i) {
25231 if (status & cur_irq->pending_mask) {
25232 - atomic_inc(&cur_irq->irq_received);
25233 + atomic_inc_unchecked(&cur_irq->irq_received);
25234 DRM_WAKEUP(&cur_irq->irq_queue);
25235 handled = 1;
25236 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25237 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25238 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25239 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25240 masks[irq][4]));
25241 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25242 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25243 } else {
25244 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25245 (((cur_irq_sequence =
25246 - atomic_read(&cur_irq->irq_received)) -
25247 + atomic_read_unchecked(&cur_irq->irq_received)) -
25248 *sequence) <= (1 << 23)));
25249 }
25250 *sequence = cur_irq_sequence;
25251 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25252 }
25253
25254 for (i = 0; i < dev_priv->num_irqs; ++i) {
25255 - atomic_set(&cur_irq->irq_received, 0);
25256 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25257 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25258 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25259 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25260 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25261 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25262 case VIA_IRQ_RELATIVE:
25263 irqwait->request.sequence +=
25264 - atomic_read(&cur_irq->irq_received);
25265 + atomic_read_unchecked(&cur_irq->irq_received);
25266 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25267 case VIA_IRQ_ABSOLUTE:
25268 break;
25269 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25270 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25271 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25272 @@ -240,7 +240,7 @@ struct vmw_private {
25273 * Fencing and IRQs.
25274 */
25275
25276 - atomic_t fence_seq;
25277 + atomic_unchecked_t fence_seq;
25278 wait_queue_head_t fence_queue;
25279 wait_queue_head_t fifo_queue;
25280 atomic_t fence_queue_waiters;
25281 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25282 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25283 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25284 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25285 while (!vmw_lag_lt(queue, us)) {
25286 spin_lock(&queue->lock);
25287 if (list_empty(&queue->head))
25288 - sequence = atomic_read(&dev_priv->fence_seq);
25289 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25290 else {
25291 fence = list_first_entry(&queue->head,
25292 struct vmw_fence, head);
25293 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25294 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25295 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25296 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25297 (unsigned int) min,
25298 (unsigned int) fifo->capabilities);
25299
25300 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25301 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25302 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25303 vmw_fence_queue_init(&fifo->fence_queue);
25304 return vmw_fifo_send_fence(dev_priv, &dummy);
25305 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25306
25307 fm = vmw_fifo_reserve(dev_priv, bytes);
25308 if (unlikely(fm == NULL)) {
25309 - *sequence = atomic_read(&dev_priv->fence_seq);
25310 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25311 ret = -ENOMEM;
25312 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25313 false, 3*HZ);
25314 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25315 }
25316
25317 do {
25318 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25319 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25320 } while (*sequence == 0);
25321
25322 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25323 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25324 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25325 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25326 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25327 * emitted. Then the fence is stale and signaled.
25328 */
25329
25330 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25331 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25332 > VMW_FENCE_WRAP);
25333
25334 return ret;
25335 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25336
25337 if (fifo_idle)
25338 down_read(&fifo_state->rwsem);
25339 - signal_seq = atomic_read(&dev_priv->fence_seq);
25340 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25341 ret = 0;
25342
25343 for (;;) {
25344 diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25345 --- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25346 +++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25347 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25348
25349 int hid_add_device(struct hid_device *hdev)
25350 {
25351 - static atomic_t id = ATOMIC_INIT(0);
25352 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25353 int ret;
25354
25355 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25356 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25357 /* XXX hack, any other cleaner solution after the driver core
25358 * is converted to allow more than 20 bytes as the device name? */
25359 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25360 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25361 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25362
25363 hid_debug_register(hdev, dev_name(&hdev->dev));
25364 ret = device_add(&hdev->dev);
25365 diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25366 --- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25367 +++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25368 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25369 break;
25370
25371 case HIDIOCAPPLICATION:
25372 - if (arg < 0 || arg >= hid->maxapplication)
25373 + if (arg >= hid->maxapplication)
25374 break;
25375
25376 for (i = 0; i < hid->maxcollection; i++)
25377 diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25378 --- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25379 +++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25380 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25381 return res;
25382
25383 temp /= 1000;
25384 - if (temp < 0)
25385 - return -EINVAL;
25386
25387 mutex_lock(&resource->lock);
25388 resource->trip[attr->index - 7] = temp;
25389 diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25390 --- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25391 +++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25392 @@ -166,7 +166,7 @@ struct sht15_data {
25393 int supply_uV;
25394 bool supply_uV_valid;
25395 struct work_struct update_supply_work;
25396 - atomic_t interrupt_handled;
25397 + atomic_unchecked_t interrupt_handled;
25398 };
25399
25400 /**
25401 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25402 return ret;
25403
25404 gpio_direction_input(data->pdata->gpio_data);
25405 - atomic_set(&data->interrupt_handled, 0);
25406 + atomic_set_unchecked(&data->interrupt_handled, 0);
25407
25408 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25409 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25410 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25411 /* Only relevant if the interrupt hasn't occurred. */
25412 - if (!atomic_read(&data->interrupt_handled))
25413 + if (!atomic_read_unchecked(&data->interrupt_handled))
25414 schedule_work(&data->read_work);
25415 }
25416 ret = wait_event_timeout(data->wait_queue,
25417 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25418
25419 /* First disable the interrupt */
25420 disable_irq_nosync(irq);
25421 - atomic_inc(&data->interrupt_handled);
25422 + atomic_inc_unchecked(&data->interrupt_handled);
25423 /* Then schedule a reading work struct */
25424 if (data->state != SHT15_READING_NOTHING)
25425 schedule_work(&data->read_work);
25426 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25427 * If not, then start the interrupt again - care here as could
25428 * have gone low in meantime so verify it hasn't!
25429 */
25430 - atomic_set(&data->interrupt_handled, 0);
25431 + atomic_set_unchecked(&data->interrupt_handled, 0);
25432 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25433 /* If still not occurred or another handler has been scheduled */
25434 if (gpio_get_value(data->pdata->gpio_data)
25435 - || atomic_read(&data->interrupt_handled))
25436 + || atomic_read_unchecked(&data->interrupt_handled))
25437 return;
25438 }
25439
25440 diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25441 --- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25442 +++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25443 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25444 struct i2c_board_info *info);
25445 static int w83791d_remove(struct i2c_client *client);
25446
25447 -static int w83791d_read(struct i2c_client *client, u8 register);
25448 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25449 +static int w83791d_read(struct i2c_client *client, u8 reg);
25450 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25451 static struct w83791d_data *w83791d_update_device(struct device *dev);
25452
25453 #ifdef DEBUG
25454 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
25455 --- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25456 +++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25457 @@ -43,7 +43,7 @@
25458 extern struct i2c_adapter amd756_smbus;
25459
25460 static struct i2c_adapter *s4882_adapter;
25461 -static struct i2c_algorithm *s4882_algo;
25462 +static i2c_algorithm_no_const *s4882_algo;
25463
25464 /* Wrapper access functions for multiplexed SMBus */
25465 static DEFINE_MUTEX(amd756_lock);
25466 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25467 --- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25468 +++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25469 @@ -41,7 +41,7 @@
25470 extern struct i2c_adapter *nforce2_smbus;
25471
25472 static struct i2c_adapter *s4985_adapter;
25473 -static struct i2c_algorithm *s4985_algo;
25474 +static i2c_algorithm_no_const *s4985_algo;
25475
25476 /* Wrapper access functions for multiplexed SMBus */
25477 static DEFINE_MUTEX(nforce2_lock);
25478 diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
25479 --- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25480 +++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25481 @@ -28,7 +28,7 @@
25482 /* multiplexer per channel data */
25483 struct i2c_mux_priv {
25484 struct i2c_adapter adap;
25485 - struct i2c_algorithm algo;
25486 + i2c_algorithm_no_const algo;
25487
25488 struct i2c_adapter *parent;
25489 void *mux_dev; /* the mux chip/device */
25490 diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
25491 --- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25492 +++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25493 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25494 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25495 if ((unsigned long)buf & alignment
25496 || blk_rq_bytes(rq) & q->dma_pad_mask
25497 - || object_is_on_stack(buf))
25498 + || object_starts_on_stack(buf))
25499 drive->dma = 0;
25500 }
25501 }
25502 diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
25503 --- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25504 +++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25505 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25506 u8 pc_buf[256], header_len, desc_cnt;
25507 int i, rc = 1, blocks, length;
25508
25509 + pax_track_stack();
25510 +
25511 ide_debug_log(IDE_DBG_FUNC, "enter");
25512
25513 drive->bios_cyl = 0;
25514 diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
25515 --- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25516 +++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25517 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25518 int ret, i, n_ports = dev2 ? 4 : 2;
25519 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25520
25521 + pax_track_stack();
25522 +
25523 for (i = 0; i < n_ports / 2; i++) {
25524 ret = ide_setup_pci_controller(pdev[i], d, !i);
25525 if (ret < 0)
25526 diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
25527 --- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25528 +++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25529 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25530
25531 struct cm_counter_group {
25532 struct kobject obj;
25533 - atomic_long_t counter[CM_ATTR_COUNT];
25534 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25535 };
25536
25537 struct cm_counter_attribute {
25538 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25539 struct ib_mad_send_buf *msg = NULL;
25540 int ret;
25541
25542 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25543 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25544 counter[CM_REQ_COUNTER]);
25545
25546 /* Quick state check to discard duplicate REQs. */
25547 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25548 if (!cm_id_priv)
25549 return;
25550
25551 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25552 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25553 counter[CM_REP_COUNTER]);
25554 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25555 if (ret)
25556 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25557 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25558 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25559 spin_unlock_irq(&cm_id_priv->lock);
25560 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25561 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25562 counter[CM_RTU_COUNTER]);
25563 goto out;
25564 }
25565 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25566 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25567 dreq_msg->local_comm_id);
25568 if (!cm_id_priv) {
25569 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25570 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25571 counter[CM_DREQ_COUNTER]);
25572 cm_issue_drep(work->port, work->mad_recv_wc);
25573 return -EINVAL;
25574 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25575 case IB_CM_MRA_REP_RCVD:
25576 break;
25577 case IB_CM_TIMEWAIT:
25578 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25579 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25580 counter[CM_DREQ_COUNTER]);
25581 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25582 goto unlock;
25583 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25584 cm_free_msg(msg);
25585 goto deref;
25586 case IB_CM_DREQ_RCVD:
25587 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25588 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25589 counter[CM_DREQ_COUNTER]);
25590 goto unlock;
25591 default:
25592 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25593 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25594 cm_id_priv->msg, timeout)) {
25595 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25596 - atomic_long_inc(&work->port->
25597 + atomic_long_inc_unchecked(&work->port->
25598 counter_group[CM_RECV_DUPLICATES].
25599 counter[CM_MRA_COUNTER]);
25600 goto out;
25601 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25602 break;
25603 case IB_CM_MRA_REQ_RCVD:
25604 case IB_CM_MRA_REP_RCVD:
25605 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25606 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25607 counter[CM_MRA_COUNTER]);
25608 /* fall through */
25609 default:
25610 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25611 case IB_CM_LAP_IDLE:
25612 break;
25613 case IB_CM_MRA_LAP_SENT:
25614 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25615 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25616 counter[CM_LAP_COUNTER]);
25617 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25618 goto unlock;
25619 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25620 cm_free_msg(msg);
25621 goto deref;
25622 case IB_CM_LAP_RCVD:
25623 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625 counter[CM_LAP_COUNTER]);
25626 goto unlock;
25627 default:
25628 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25629 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25630 if (cur_cm_id_priv) {
25631 spin_unlock_irq(&cm.lock);
25632 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25633 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25634 counter[CM_SIDR_REQ_COUNTER]);
25635 goto out; /* Duplicate message. */
25636 }
25637 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25638 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25639 msg->retries = 1;
25640
25641 - atomic_long_add(1 + msg->retries,
25642 + atomic_long_add_unchecked(1 + msg->retries,
25643 &port->counter_group[CM_XMIT].counter[attr_index]);
25644 if (msg->retries)
25645 - atomic_long_add(msg->retries,
25646 + atomic_long_add_unchecked(msg->retries,
25647 &port->counter_group[CM_XMIT_RETRIES].
25648 counter[attr_index]);
25649
25650 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25651 }
25652
25653 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25654 - atomic_long_inc(&port->counter_group[CM_RECV].
25655 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25656 counter[attr_id - CM_ATTR_ID_OFFSET]);
25657
25658 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25659 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25660 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25661
25662 return sprintf(buf, "%ld\n",
25663 - atomic_long_read(&group->counter[cm_attr->index]));
25664 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25665 }
25666
25667 static const struct sysfs_ops cm_counter_ops = {
25668 diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
25669 --- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25670 +++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25671 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25672
25673 struct task_struct *thread;
25674
25675 - atomic_t req_ser;
25676 - atomic_t flush_ser;
25677 + atomic_unchecked_t req_ser;
25678 + atomic_unchecked_t flush_ser;
25679
25680 wait_queue_head_t force_wait;
25681 };
25682 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25683 struct ib_fmr_pool *pool = pool_ptr;
25684
25685 do {
25686 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25687 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25688 ib_fmr_batch_release(pool);
25689
25690 - atomic_inc(&pool->flush_ser);
25691 + atomic_inc_unchecked(&pool->flush_ser);
25692 wake_up_interruptible(&pool->force_wait);
25693
25694 if (pool->flush_function)
25695 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25696 }
25697
25698 set_current_state(TASK_INTERRUPTIBLE);
25699 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25700 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25701 !kthread_should_stop())
25702 schedule();
25703 __set_current_state(TASK_RUNNING);
25704 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25705 pool->dirty_watermark = params->dirty_watermark;
25706 pool->dirty_len = 0;
25707 spin_lock_init(&pool->pool_lock);
25708 - atomic_set(&pool->req_ser, 0);
25709 - atomic_set(&pool->flush_ser, 0);
25710 + atomic_set_unchecked(&pool->req_ser, 0);
25711 + atomic_set_unchecked(&pool->flush_ser, 0);
25712 init_waitqueue_head(&pool->force_wait);
25713
25714 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25715 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25716 }
25717 spin_unlock_irq(&pool->pool_lock);
25718
25719 - serial = atomic_inc_return(&pool->req_ser);
25720 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25721 wake_up_process(pool->thread);
25722
25723 if (wait_event_interruptible(pool->force_wait,
25724 - atomic_read(&pool->flush_ser) - serial >= 0))
25725 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25726 return -EINTR;
25727
25728 return 0;
25729 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25730 } else {
25731 list_add_tail(&fmr->list, &pool->dirty_list);
25732 if (++pool->dirty_len >= pool->dirty_watermark) {
25733 - atomic_inc(&pool->req_ser);
25734 + atomic_inc_unchecked(&pool->req_ser);
25735 wake_up_process(pool->thread);
25736 }
25737 }
25738 diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
25739 --- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25740 +++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25741 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25742 int err;
25743 struct fw_ri_tpte tpt;
25744 u32 stag_idx;
25745 - static atomic_t key;
25746 + static atomic_unchecked_t key;
25747
25748 if (c4iw_fatal_error(rdev))
25749 return -EIO;
25750 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25751 &rdev->resource.tpt_fifo_lock);
25752 if (!stag_idx)
25753 return -ENOMEM;
25754 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25755 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25756 }
25757 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25758 __func__, stag_state, type, pdid, stag_idx);
25759 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
25760 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25761 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25762 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25763 struct infinipath_counters counters;
25764 struct ipath_devdata *dd;
25765
25766 + pax_track_stack();
25767 +
25768 dd = file->f_path.dentry->d_inode->i_private;
25769 dd->ipath_f_read_counters(dd, &counters);
25770
25771 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
25772 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25773 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25774 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25775 struct ib_atomic_eth *ateth;
25776 struct ipath_ack_entry *e;
25777 u64 vaddr;
25778 - atomic64_t *maddr;
25779 + atomic64_unchecked_t *maddr;
25780 u64 sdata;
25781 u32 rkey;
25782 u8 next;
25783 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25784 IB_ACCESS_REMOTE_ATOMIC)))
25785 goto nack_acc_unlck;
25786 /* Perform atomic OP and save result. */
25787 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25788 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25789 sdata = be64_to_cpu(ateth->swap_data);
25790 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25791 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25792 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25793 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25794 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25795 be64_to_cpu(ateth->compare_data),
25796 sdata);
25797 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25798 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25799 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25800 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25801 unsigned long flags;
25802 struct ib_wc wc;
25803 u64 sdata;
25804 - atomic64_t *maddr;
25805 + atomic64_unchecked_t *maddr;
25806 enum ib_wc_status send_status;
25807
25808 /*
25809 @@ -382,11 +382,11 @@ again:
25810 IB_ACCESS_REMOTE_ATOMIC)))
25811 goto acc_err;
25812 /* Perform atomic OP and save result. */
25813 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25814 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25815 sdata = wqe->wr.wr.atomic.compare_add;
25816 *(u64 *) sqp->s_sge.sge.vaddr =
25817 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25818 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25819 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25820 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25821 sdata, wqe->wr.wr.atomic.swap);
25822 goto send_comp;
25823 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
25824 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25825 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25826 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25827 LIST_HEAD(nes_adapter_list);
25828 static LIST_HEAD(nes_dev_list);
25829
25830 -atomic_t qps_destroyed;
25831 +atomic_unchecked_t qps_destroyed;
25832
25833 static unsigned int ee_flsh_adapter;
25834 static unsigned int sysfs_nonidx_addr;
25835 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25836 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25837 struct nes_adapter *nesadapter = nesdev->nesadapter;
25838
25839 - atomic_inc(&qps_destroyed);
25840 + atomic_inc_unchecked(&qps_destroyed);
25841
25842 /* Free the control structures */
25843
25844 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
25845 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25846 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25847 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25848 u32 cm_packets_retrans;
25849 u32 cm_packets_created;
25850 u32 cm_packets_received;
25851 -atomic_t cm_listens_created;
25852 -atomic_t cm_listens_destroyed;
25853 +atomic_unchecked_t cm_listens_created;
25854 +atomic_unchecked_t cm_listens_destroyed;
25855 u32 cm_backlog_drops;
25856 -atomic_t cm_loopbacks;
25857 -atomic_t cm_nodes_created;
25858 -atomic_t cm_nodes_destroyed;
25859 -atomic_t cm_accel_dropped_pkts;
25860 -atomic_t cm_resets_recvd;
25861 +atomic_unchecked_t cm_loopbacks;
25862 +atomic_unchecked_t cm_nodes_created;
25863 +atomic_unchecked_t cm_nodes_destroyed;
25864 +atomic_unchecked_t cm_accel_dropped_pkts;
25865 +atomic_unchecked_t cm_resets_recvd;
25866
25867 static inline int mini_cm_accelerated(struct nes_cm_core *,
25868 struct nes_cm_node *);
25869 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25870
25871 static struct nes_cm_core *g_cm_core;
25872
25873 -atomic_t cm_connects;
25874 -atomic_t cm_accepts;
25875 -atomic_t cm_disconnects;
25876 -atomic_t cm_closes;
25877 -atomic_t cm_connecteds;
25878 -atomic_t cm_connect_reqs;
25879 -atomic_t cm_rejects;
25880 +atomic_unchecked_t cm_connects;
25881 +atomic_unchecked_t cm_accepts;
25882 +atomic_unchecked_t cm_disconnects;
25883 +atomic_unchecked_t cm_closes;
25884 +atomic_unchecked_t cm_connecteds;
25885 +atomic_unchecked_t cm_connect_reqs;
25886 +atomic_unchecked_t cm_rejects;
25887
25888
25889 /**
25890 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25891 kfree(listener);
25892 listener = NULL;
25893 ret = 0;
25894 - atomic_inc(&cm_listens_destroyed);
25895 + atomic_inc_unchecked(&cm_listens_destroyed);
25896 } else {
25897 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25898 }
25899 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25900 cm_node->rem_mac);
25901
25902 add_hte_node(cm_core, cm_node);
25903 - atomic_inc(&cm_nodes_created);
25904 + atomic_inc_unchecked(&cm_nodes_created);
25905
25906 return cm_node;
25907 }
25908 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25909 }
25910
25911 atomic_dec(&cm_core->node_cnt);
25912 - atomic_inc(&cm_nodes_destroyed);
25913 + atomic_inc_unchecked(&cm_nodes_destroyed);
25914 nesqp = cm_node->nesqp;
25915 if (nesqp) {
25916 nesqp->cm_node = NULL;
25917 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25918
25919 static void drop_packet(struct sk_buff *skb)
25920 {
25921 - atomic_inc(&cm_accel_dropped_pkts);
25922 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25923 dev_kfree_skb_any(skb);
25924 }
25925
25926 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25927 {
25928
25929 int reset = 0; /* whether to send reset in case of err.. */
25930 - atomic_inc(&cm_resets_recvd);
25931 + atomic_inc_unchecked(&cm_resets_recvd);
25932 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25933 " refcnt=%d\n", cm_node, cm_node->state,
25934 atomic_read(&cm_node->ref_count));
25935 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25936 rem_ref_cm_node(cm_node->cm_core, cm_node);
25937 return NULL;
25938 }
25939 - atomic_inc(&cm_loopbacks);
25940 + atomic_inc_unchecked(&cm_loopbacks);
25941 loopbackremotenode->loopbackpartner = cm_node;
25942 loopbackremotenode->tcp_cntxt.rcv_wscale =
25943 NES_CM_DEFAULT_RCV_WND_SCALE;
25944 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25945 add_ref_cm_node(cm_node);
25946 } else if (cm_node->state == NES_CM_STATE_TSA) {
25947 rem_ref_cm_node(cm_core, cm_node);
25948 - atomic_inc(&cm_accel_dropped_pkts);
25949 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25950 dev_kfree_skb_any(skb);
25951 break;
25952 }
25953 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25954
25955 if ((cm_id) && (cm_id->event_handler)) {
25956 if (issue_disconn) {
25957 - atomic_inc(&cm_disconnects);
25958 + atomic_inc_unchecked(&cm_disconnects);
25959 cm_event.event = IW_CM_EVENT_DISCONNECT;
25960 cm_event.status = disconn_status;
25961 cm_event.local_addr = cm_id->local_addr;
25962 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25963 }
25964
25965 if (issue_close) {
25966 - atomic_inc(&cm_closes);
25967 + atomic_inc_unchecked(&cm_closes);
25968 nes_disconnect(nesqp, 1);
25969
25970 cm_id->provider_data = nesqp;
25971 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25972
25973 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25974 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25975 - atomic_inc(&cm_accepts);
25976 + atomic_inc_unchecked(&cm_accepts);
25977
25978 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25979 netdev_refcnt_read(nesvnic->netdev));
25980 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25981
25982 struct nes_cm_core *cm_core;
25983
25984 - atomic_inc(&cm_rejects);
25985 + atomic_inc_unchecked(&cm_rejects);
25986 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25987 loopback = cm_node->loopbackpartner;
25988 cm_core = cm_node->cm_core;
25989 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25990 ntohl(cm_id->local_addr.sin_addr.s_addr),
25991 ntohs(cm_id->local_addr.sin_port));
25992
25993 - atomic_inc(&cm_connects);
25994 + atomic_inc_unchecked(&cm_connects);
25995 nesqp->active_conn = 1;
25996
25997 /* cache the cm_id in the qp */
25998 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25999 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26000 return err;
26001 }
26002 - atomic_inc(&cm_listens_created);
26003 + atomic_inc_unchecked(&cm_listens_created);
26004 }
26005
26006 cm_id->add_ref(cm_id);
26007 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26008 if (nesqp->destroyed) {
26009 return;
26010 }
26011 - atomic_inc(&cm_connecteds);
26012 + atomic_inc_unchecked(&cm_connecteds);
26013 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26014 " local port 0x%04X. jiffies = %lu.\n",
26015 nesqp->hwqp.qp_id,
26016 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26017
26018 cm_id->add_ref(cm_id);
26019 ret = cm_id->event_handler(cm_id, &cm_event);
26020 - atomic_inc(&cm_closes);
26021 + atomic_inc_unchecked(&cm_closes);
26022 cm_event.event = IW_CM_EVENT_CLOSE;
26023 cm_event.status = 0;
26024 cm_event.provider_data = cm_id->provider_data;
26025 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26026 return;
26027 cm_id = cm_node->cm_id;
26028
26029 - atomic_inc(&cm_connect_reqs);
26030 + atomic_inc_unchecked(&cm_connect_reqs);
26031 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26032 cm_node, cm_id, jiffies);
26033
26034 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26035 return;
26036 cm_id = cm_node->cm_id;
26037
26038 - atomic_inc(&cm_connect_reqs);
26039 + atomic_inc_unchecked(&cm_connect_reqs);
26040 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26041 cm_node, cm_id, jiffies);
26042
26043 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26044 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26045 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26046 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26047 extern unsigned int wqm_quanta;
26048 extern struct list_head nes_adapter_list;
26049
26050 -extern atomic_t cm_connects;
26051 -extern atomic_t cm_accepts;
26052 -extern atomic_t cm_disconnects;
26053 -extern atomic_t cm_closes;
26054 -extern atomic_t cm_connecteds;
26055 -extern atomic_t cm_connect_reqs;
26056 -extern atomic_t cm_rejects;
26057 -extern atomic_t mod_qp_timouts;
26058 -extern atomic_t qps_created;
26059 -extern atomic_t qps_destroyed;
26060 -extern atomic_t sw_qps_destroyed;
26061 +extern atomic_unchecked_t cm_connects;
26062 +extern atomic_unchecked_t cm_accepts;
26063 +extern atomic_unchecked_t cm_disconnects;
26064 +extern atomic_unchecked_t cm_closes;
26065 +extern atomic_unchecked_t cm_connecteds;
26066 +extern atomic_unchecked_t cm_connect_reqs;
26067 +extern atomic_unchecked_t cm_rejects;
26068 +extern atomic_unchecked_t mod_qp_timouts;
26069 +extern atomic_unchecked_t qps_created;
26070 +extern atomic_unchecked_t qps_destroyed;
26071 +extern atomic_unchecked_t sw_qps_destroyed;
26072 extern u32 mh_detected;
26073 extern u32 mh_pauses_sent;
26074 extern u32 cm_packets_sent;
26075 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26076 extern u32 cm_packets_received;
26077 extern u32 cm_packets_dropped;
26078 extern u32 cm_packets_retrans;
26079 -extern atomic_t cm_listens_created;
26080 -extern atomic_t cm_listens_destroyed;
26081 +extern atomic_unchecked_t cm_listens_created;
26082 +extern atomic_unchecked_t cm_listens_destroyed;
26083 extern u32 cm_backlog_drops;
26084 -extern atomic_t cm_loopbacks;
26085 -extern atomic_t cm_nodes_created;
26086 -extern atomic_t cm_nodes_destroyed;
26087 -extern atomic_t cm_accel_dropped_pkts;
26088 -extern atomic_t cm_resets_recvd;
26089 +extern atomic_unchecked_t cm_loopbacks;
26090 +extern atomic_unchecked_t cm_nodes_created;
26091 +extern atomic_unchecked_t cm_nodes_destroyed;
26092 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26093 +extern atomic_unchecked_t cm_resets_recvd;
26094
26095 extern u32 int_mod_timer_init;
26096 extern u32 int_mod_cq_depth_256;
26097 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26098 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26099 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26100 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26101 target_stat_values[++index] = mh_detected;
26102 target_stat_values[++index] = mh_pauses_sent;
26103 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26104 - target_stat_values[++index] = atomic_read(&cm_connects);
26105 - target_stat_values[++index] = atomic_read(&cm_accepts);
26106 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26107 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26108 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26109 - target_stat_values[++index] = atomic_read(&cm_rejects);
26110 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26111 - target_stat_values[++index] = atomic_read(&qps_created);
26112 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26113 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26114 - target_stat_values[++index] = atomic_read(&cm_closes);
26115 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26116 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26117 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26118 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26119 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26120 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26121 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26122 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26123 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26124 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26125 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26126 target_stat_values[++index] = cm_packets_sent;
26127 target_stat_values[++index] = cm_packets_bounced;
26128 target_stat_values[++index] = cm_packets_created;
26129 target_stat_values[++index] = cm_packets_received;
26130 target_stat_values[++index] = cm_packets_dropped;
26131 target_stat_values[++index] = cm_packets_retrans;
26132 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26133 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26134 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26135 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26136 target_stat_values[++index] = cm_backlog_drops;
26137 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26138 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26139 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26140 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26141 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26142 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26143 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26144 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26145 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26146 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26147 target_stat_values[++index] = nesadapter->free_4kpbl;
26148 target_stat_values[++index] = nesadapter->free_256pbl;
26149 target_stat_values[++index] = int_mod_timer_init;
26150 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26151 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26152 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26153 @@ -46,9 +46,9 @@
26154
26155 #include <rdma/ib_umem.h>
26156
26157 -atomic_t mod_qp_timouts;
26158 -atomic_t qps_created;
26159 -atomic_t sw_qps_destroyed;
26160 +atomic_unchecked_t mod_qp_timouts;
26161 +atomic_unchecked_t qps_created;
26162 +atomic_unchecked_t sw_qps_destroyed;
26163
26164 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26165
26166 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26167 if (init_attr->create_flags)
26168 return ERR_PTR(-EINVAL);
26169
26170 - atomic_inc(&qps_created);
26171 + atomic_inc_unchecked(&qps_created);
26172 switch (init_attr->qp_type) {
26173 case IB_QPT_RC:
26174 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26175 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26176 struct iw_cm_event cm_event;
26177 int ret;
26178
26179 - atomic_inc(&sw_qps_destroyed);
26180 + atomic_inc_unchecked(&sw_qps_destroyed);
26181 nesqp->destroyed = 1;
26182
26183 /* Blow away the connection if it exists. */
26184 diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26185 --- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26186 +++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26187 @@ -51,6 +51,7 @@
26188 #include <linux/completion.h>
26189 #include <linux/kref.h>
26190 #include <linux/sched.h>
26191 +#include <linux/slab.h>
26192
26193 #include "qib_common.h"
26194 #include "qib_verbs.h"
26195 diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26196 --- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26197 +++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26198 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26199 */
26200 static void gameport_init_port(struct gameport *gameport)
26201 {
26202 - static atomic_t gameport_no = ATOMIC_INIT(0);
26203 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26204
26205 __module_get(THIS_MODULE);
26206
26207 mutex_init(&gameport->drv_mutex);
26208 device_initialize(&gameport->dev);
26209 dev_set_name(&gameport->dev, "gameport%lu",
26210 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26211 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26212 gameport->dev.bus = &gameport_bus;
26213 gameport->dev.release = gameport_release_port;
26214 if (gameport->parent)
26215 diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26216 --- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26217 +++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26218 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26219 */
26220 int input_register_device(struct input_dev *dev)
26221 {
26222 - static atomic_t input_no = ATOMIC_INIT(0);
26223 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26224 struct input_handler *handler;
26225 const char *path;
26226 int error;
26227 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26228 dev->setkeycode = input_default_setkeycode;
26229
26230 dev_set_name(&dev->dev, "input%ld",
26231 - (unsigned long) atomic_inc_return(&input_no) - 1);
26232 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26233
26234 error = device_add(&dev->dev);
26235 if (error)
26236 diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26237 --- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26238 +++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26239 @@ -30,6 +30,7 @@
26240 #include <linux/kernel.h>
26241 #include <linux/module.h>
26242 #include <linux/slab.h>
26243 +#include <linux/sched.h>
26244 #include <linux/init.h>
26245 #include <linux/input.h>
26246 #include <linux/gameport.h>
26247 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26248 unsigned char buf[SW_LENGTH];
26249 int i;
26250
26251 + pax_track_stack();
26252 +
26253 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26254
26255 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26256 diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26257 --- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26258 +++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26259 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26260
26261 static int xpad_led_probe(struct usb_xpad *xpad)
26262 {
26263 - static atomic_t led_seq = ATOMIC_INIT(0);
26264 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26265 long led_no;
26266 struct xpad_led *led;
26267 struct led_classdev *led_cdev;
26268 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26269 if (!led)
26270 return -ENOMEM;
26271
26272 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26273 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26274
26275 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26276 led->xpad = xpad;
26277 diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26278 --- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26279 +++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26280 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26281
26282 spin_unlock_irq(&client->packet_lock);
26283
26284 - if (copy_to_user(buffer, data, count))
26285 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26286 return -EFAULT;
26287
26288 return count;
26289 diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26290 --- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26291 +++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26292 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26293 */
26294 static void serio_init_port(struct serio *serio)
26295 {
26296 - static atomic_t serio_no = ATOMIC_INIT(0);
26297 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26298
26299 __module_get(THIS_MODULE);
26300
26301 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26302 mutex_init(&serio->drv_mutex);
26303 device_initialize(&serio->dev);
26304 dev_set_name(&serio->dev, "serio%ld",
26305 - (long)atomic_inc_return(&serio_no) - 1);
26306 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26307 serio->dev.bus = &serio_bus;
26308 serio->dev.release = serio_release_port;
26309 serio->dev.groups = serio_device_attr_groups;
26310 diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26311 --- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26312 +++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26313 @@ -83,8 +83,8 @@ struct capiminor {
26314
26315 struct capi20_appl *ap;
26316 u32 ncci;
26317 - atomic_t datahandle;
26318 - atomic_t msgid;
26319 + atomic_unchecked_t datahandle;
26320 + atomic_unchecked_t msgid;
26321
26322 struct tty_port port;
26323 int ttyinstop;
26324 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26325 capimsg_setu16(s, 2, mp->ap->applid);
26326 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26327 capimsg_setu8 (s, 5, CAPI_RESP);
26328 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26329 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26330 capimsg_setu32(s, 8, mp->ncci);
26331 capimsg_setu16(s, 12, datahandle);
26332 }
26333 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26334 mp->outbytes -= len;
26335 spin_unlock_bh(&mp->outlock);
26336
26337 - datahandle = atomic_inc_return(&mp->datahandle);
26338 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26339 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26340 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26341 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26342 capimsg_setu16(skb->data, 2, mp->ap->applid);
26343 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26344 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26345 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26346 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26347 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26348 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26349 capimsg_setu16(skb->data, 16, len); /* Data length */
26350 diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26351 --- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26352 +++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26353 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26354 cs->commands_pending = 0;
26355 cs->cur_at_seq = 0;
26356 cs->gotfwver = -1;
26357 - cs->open_count = 0;
26358 + local_set(&cs->open_count, 0);
26359 cs->dev = NULL;
26360 cs->tty = NULL;
26361 cs->tty_dev = NULL;
26362 diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26363 --- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26364 +++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26365 @@ -35,6 +35,7 @@
26366 #include <linux/tty_driver.h>
26367 #include <linux/list.h>
26368 #include <asm/atomic.h>
26369 +#include <asm/local.h>
26370
26371 #define GIG_VERSION {0, 5, 0, 0}
26372 #define GIG_COMPAT {0, 4, 0, 0}
26373 @@ -433,7 +434,7 @@ struct cardstate {
26374 spinlock_t cmdlock;
26375 unsigned curlen, cmdbytes;
26376
26377 - unsigned open_count;
26378 + local_t open_count;
26379 struct tty_struct *tty;
26380 struct tasklet_struct if_wake_tasklet;
26381 unsigned control_state;
26382 diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26383 --- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26384 +++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26385 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26386 }
26387 tty->driver_data = cs;
26388
26389 - ++cs->open_count;
26390 -
26391 - if (cs->open_count == 1) {
26392 + if (local_inc_return(&cs->open_count) == 1) {
26393 spin_lock_irqsave(&cs->lock, flags);
26394 cs->tty = tty;
26395 spin_unlock_irqrestore(&cs->lock, flags);
26396 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26397
26398 if (!cs->connected)
26399 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26400 - else if (!cs->open_count)
26401 + else if (!local_read(&cs->open_count))
26402 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26403 else {
26404 - if (!--cs->open_count) {
26405 + if (!local_dec_return(&cs->open_count)) {
26406 spin_lock_irqsave(&cs->lock, flags);
26407 cs->tty = NULL;
26408 spin_unlock_irqrestore(&cs->lock, flags);
26409 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26410 if (!cs->connected) {
26411 gig_dbg(DEBUG_IF, "not connected");
26412 retval = -ENODEV;
26413 - } else if (!cs->open_count)
26414 + } else if (!local_read(&cs->open_count))
26415 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26416 else {
26417 retval = 0;
26418 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26419 retval = -ENODEV;
26420 goto done;
26421 }
26422 - if (!cs->open_count) {
26423 + if (!local_read(&cs->open_count)) {
26424 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26425 retval = -ENODEV;
26426 goto done;
26427 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26428 if (!cs->connected) {
26429 gig_dbg(DEBUG_IF, "not connected");
26430 retval = -ENODEV;
26431 - } else if (!cs->open_count)
26432 + } else if (!local_read(&cs->open_count))
26433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26434 else if (cs->mstate != MS_LOCKED) {
26435 dev_warn(cs->dev, "can't write to unlocked device\n");
26436 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26437
26438 if (!cs->connected)
26439 gig_dbg(DEBUG_IF, "not connected");
26440 - else if (!cs->open_count)
26441 + else if (!local_read(&cs->open_count))
26442 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26443 else if (cs->mstate != MS_LOCKED)
26444 dev_warn(cs->dev, "can't write to unlocked device\n");
26445 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26446
26447 if (!cs->connected)
26448 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26449 - else if (!cs->open_count)
26450 + else if (!local_read(&cs->open_count))
26451 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26452 else
26453 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26454 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26455
26456 if (!cs->connected)
26457 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26458 - else if (!cs->open_count)
26459 + else if (!local_read(&cs->open_count))
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461 else
26462 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26463 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26464 goto out;
26465 }
26466
26467 - if (!cs->open_count) {
26468 + if (!local_read(&cs->open_count)) {
26469 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26470 goto out;
26471 }
26472 diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
26473 --- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26474 +++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26475 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26476 }
26477 if (left) {
26478 if (t4file->user) {
26479 - if (copy_from_user(buf, dp, left))
26480 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26481 return -EFAULT;
26482 } else {
26483 memcpy(buf, dp, left);
26484 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26485 }
26486 if (left) {
26487 if (config->user) {
26488 - if (copy_from_user(buf, dp, left))
26489 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26490 return -EFAULT;
26491 } else {
26492 memcpy(buf, dp, left);
26493 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
26494 --- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26495 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26496 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26497 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26498 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26499
26500 + pax_track_stack();
26501
26502 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26503 {
26504 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
26505 --- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26506 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26507 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26508 IDI_SYNC_REQ req;
26509 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26510
26511 + pax_track_stack();
26512 +
26513 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26514
26515 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26516 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
26517 --- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26518 +++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26519 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26520 IDI_SYNC_REQ req;
26521 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26522
26523 + pax_track_stack();
26524 +
26525 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26526
26527 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26528 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
26529 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26530 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26531 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26532 IDI_SYNC_REQ req;
26533 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26534
26535 + pax_track_stack();
26536 +
26537 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26538
26539 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26540 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
26541 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26542 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26543 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26544 } diva_didd_add_adapter_t;
26545 typedef struct _diva_didd_remove_adapter {
26546 IDI_CALL p_request;
26547 -} diva_didd_remove_adapter_t;
26548 +} __no_const diva_didd_remove_adapter_t;
26549 typedef struct _diva_didd_read_adapter_array {
26550 void * buffer;
26551 dword length;
26552 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
26553 --- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26554 +++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26555 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26556 IDI_SYNC_REQ req;
26557 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26558
26559 + pax_track_stack();
26560 +
26561 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26562
26563 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26564 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
26565 --- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26566 +++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26567 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26568 dword d;
26569 word w;
26570
26571 + pax_track_stack();
26572 +
26573 a = plci->adapter;
26574 Id = ((word)plci->Id<<8)|a->Id;
26575 PUT_WORD(&SS_Ind[4],0x0000);
26576 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26577 word j, n, w;
26578 dword d;
26579
26580 + pax_track_stack();
26581 +
26582
26583 for(i=0;i<8;i++) bp_parms[i].length = 0;
26584 for(i=0;i<2;i++) global_config[i].length = 0;
26585 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26586 const byte llc3[] = {4,3,2,2,6,6,0};
26587 const byte header[] = {0,2,3,3,0,0,0};
26588
26589 + pax_track_stack();
26590 +
26591 for(i=0;i<8;i++) bp_parms[i].length = 0;
26592 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26593 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26594 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26595 word appl_number_group_type[MAX_APPL];
26596 PLCI *auxplci;
26597
26598 + pax_track_stack();
26599 +
26600 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26601
26602 if(!a->group_optimization_enabled)
26603 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
26604 --- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26605 +++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26606 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26607 IDI_SYNC_REQ req;
26608 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26609
26610 + pax_track_stack();
26611 +
26612 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26613
26614 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26615 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26616 --- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26617 +++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26618 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26619 typedef struct _diva_os_idi_adapter_interface {
26620 diva_init_card_proc_t cleanup_adapter_proc;
26621 diva_cmd_card_proc_t cmd_proc;
26622 -} diva_os_idi_adapter_interface_t;
26623 +} __no_const diva_os_idi_adapter_interface_t;
26624
26625 typedef struct _diva_os_xdi_adapter {
26626 struct list_head link;
26627 diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
26628 --- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26629 +++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26630 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26631 } iocpar;
26632 void __user *argp = (void __user *)arg;
26633
26634 + pax_track_stack();
26635 +
26636 #define name iocpar.name
26637 #define bname iocpar.bname
26638 #define iocts iocpar.iocts
26639 diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
26640 --- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26641 +++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26642 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26643 if (count > len)
26644 count = len;
26645 if (user) {
26646 - if (copy_from_user(msg, buf, count))
26647 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26648 return -EFAULT;
26649 } else
26650 memcpy(msg, buf, count);
26651 diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
26652 --- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26653 +++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26654 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26655 * it's worked so far. The end address needs +1 because __get_vm_area
26656 * allocates an extra guard page, so we need space for that.
26657 */
26658 +
26659 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26660 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26661 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26662 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26663 +#else
26664 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26665 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26666 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26667 +#endif
26668 +
26669 if (!switcher_vma) {
26670 err = -ENOMEM;
26671 printk("lguest: could not map switcher pages high\n");
26672 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26673 * Now the Switcher is mapped at the right address, we can't fail!
26674 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26675 */
26676 - memcpy(switcher_vma->addr, start_switcher_text,
26677 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26678 end_switcher_text - start_switcher_text);
26679
26680 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26681 diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
26682 --- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26683 +++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26684 @@ -59,7 +59,7 @@ static struct {
26685 /* Offset from where switcher.S was compiled to where we've copied it */
26686 static unsigned long switcher_offset(void)
26687 {
26688 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26689 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26690 }
26691
26692 /* This cpu's struct lguest_pages. */
26693 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26694 * These copies are pretty cheap, so we do them unconditionally: */
26695 /* Save the current Host top-level page directory.
26696 */
26697 +
26698 +#ifdef CONFIG_PAX_PER_CPU_PGD
26699 + pages->state.host_cr3 = read_cr3();
26700 +#else
26701 pages->state.host_cr3 = __pa(current->mm->pgd);
26702 +#endif
26703 +
26704 /*
26705 * Set up the Guest's page tables to see this CPU's pages (and no
26706 * other CPU's pages).
26707 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26708 * compiled-in switcher code and the high-mapped copy we just made.
26709 */
26710 for (i = 0; i < IDT_ENTRIES; i++)
26711 - default_idt_entries[i] += switcher_offset();
26712 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26713
26714 /*
26715 * Set up the Switcher's per-cpu areas.
26716 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26717 * it will be undisturbed when we switch. To change %cs and jump we
26718 * need this structure to feed to Intel's "lcall" instruction.
26719 */
26720 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26721 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26722 lguest_entry.segment = LGUEST_CS;
26723
26724 /*
26725 diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
26726 --- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26727 +++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26728 @@ -87,6 +87,7 @@
26729 #include <asm/page.h>
26730 #include <asm/segment.h>
26731 #include <asm/lguest.h>
26732 +#include <asm/processor-flags.h>
26733
26734 // We mark the start of the code to copy
26735 // It's placed in .text tho it's never run here
26736 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26737 // Changes type when we load it: damn Intel!
26738 // For after we switch over our page tables
26739 // That entry will be read-only: we'd crash.
26740 +
26741 +#ifdef CONFIG_PAX_KERNEXEC
26742 + mov %cr0, %edx
26743 + xor $X86_CR0_WP, %edx
26744 + mov %edx, %cr0
26745 +#endif
26746 +
26747 movl $(GDT_ENTRY_TSS*8), %edx
26748 ltr %dx
26749
26750 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26751 // Let's clear it again for our return.
26752 // The GDT descriptor of the Host
26753 // Points to the table after two "size" bytes
26754 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26755 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26756 // Clear "used" from type field (byte 5, bit 2)
26757 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26758 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26759 +
26760 +#ifdef CONFIG_PAX_KERNEXEC
26761 + mov %cr0, %eax
26762 + xor $X86_CR0_WP, %eax
26763 + mov %eax, %cr0
26764 +#endif
26765
26766 // Once our page table's switched, the Guest is live!
26767 // The Host fades as we run this final step.
26768 @@ -295,13 +309,12 @@ deliver_to_host:
26769 // I consulted gcc, and it gave
26770 // These instructions, which I gladly credit:
26771 leal (%edx,%ebx,8), %eax
26772 - movzwl (%eax),%edx
26773 - movl 4(%eax), %eax
26774 - xorw %ax, %ax
26775 - orl %eax, %edx
26776 + movl 4(%eax), %edx
26777 + movw (%eax), %dx
26778 // Now the address of the handler's in %edx
26779 // We call it now: its "iret" drops us home.
26780 - jmp *%edx
26781 + ljmp $__KERNEL_CS, $1f
26782 +1: jmp *%edx
26783
26784 // Every interrupt can come to us here
26785 // But we must truly tell each apart.
26786 diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
26787 --- linux-3.0.4/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26788 +++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26789 @@ -164,9 +164,9 @@ struct mapped_device {
26790 /*
26791 * Event handling.
26792 */
26793 - atomic_t event_nr;
26794 + atomic_unchecked_t event_nr;
26795 wait_queue_head_t eventq;
26796 - atomic_t uevent_seq;
26797 + atomic_unchecked_t uevent_seq;
26798 struct list_head uevent_list;
26799 spinlock_t uevent_lock; /* Protect access to uevent_list */
26800
26801 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26802 rwlock_init(&md->map_lock);
26803 atomic_set(&md->holders, 1);
26804 atomic_set(&md->open_count, 0);
26805 - atomic_set(&md->event_nr, 0);
26806 - atomic_set(&md->uevent_seq, 0);
26807 + atomic_set_unchecked(&md->event_nr, 0);
26808 + atomic_set_unchecked(&md->uevent_seq, 0);
26809 INIT_LIST_HEAD(&md->uevent_list);
26810 spin_lock_init(&md->uevent_lock);
26811
26812 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26813
26814 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26815
26816 - atomic_inc(&md->event_nr);
26817 + atomic_inc_unchecked(&md->event_nr);
26818 wake_up(&md->eventq);
26819 }
26820
26821 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26822
26823 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26824 {
26825 - return atomic_add_return(1, &md->uevent_seq);
26826 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26827 }
26828
26829 uint32_t dm_get_event_nr(struct mapped_device *md)
26830 {
26831 - return atomic_read(&md->event_nr);
26832 + return atomic_read_unchecked(&md->event_nr);
26833 }
26834
26835 int dm_wait_event(struct mapped_device *md, int event_nr)
26836 {
26837 return wait_event_interruptible(md->eventq,
26838 - (event_nr != atomic_read(&md->event_nr)));
26839 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26840 }
26841
26842 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26843 diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
26844 --- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26845 +++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26846 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26847 cmd == DM_LIST_VERSIONS_CMD)
26848 return 0;
26849
26850 - if ((cmd == DM_DEV_CREATE_CMD)) {
26851 + if (cmd == DM_DEV_CREATE_CMD) {
26852 if (!*param->name) {
26853 DMWARN("name not supplied when creating device");
26854 return -EINVAL;
26855 diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
26856 --- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26857 +++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26858 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26859
26860 struct mirror {
26861 struct mirror_set *ms;
26862 - atomic_t error_count;
26863 + atomic_unchecked_t error_count;
26864 unsigned long error_type;
26865 struct dm_dev *dev;
26866 sector_t offset;
26867 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26868 struct mirror *m;
26869
26870 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26871 - if (!atomic_read(&m->error_count))
26872 + if (!atomic_read_unchecked(&m->error_count))
26873 return m;
26874
26875 return NULL;
26876 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26877 * simple way to tell if a device has encountered
26878 * errors.
26879 */
26880 - atomic_inc(&m->error_count);
26881 + atomic_inc_unchecked(&m->error_count);
26882
26883 if (test_and_set_bit(error_type, &m->error_type))
26884 return;
26885 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26886 struct mirror *m = get_default_mirror(ms);
26887
26888 do {
26889 - if (likely(!atomic_read(&m->error_count)))
26890 + if (likely(!atomic_read_unchecked(&m->error_count)))
26891 return m;
26892
26893 if (m-- == ms->mirror)
26894 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26895 {
26896 struct mirror *default_mirror = get_default_mirror(m->ms);
26897
26898 - return !atomic_read(&default_mirror->error_count);
26899 + return !atomic_read_unchecked(&default_mirror->error_count);
26900 }
26901
26902 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26903 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26904 */
26905 if (likely(region_in_sync(ms, region, 1)))
26906 m = choose_mirror(ms, bio->bi_sector);
26907 - else if (m && atomic_read(&m->error_count))
26908 + else if (m && atomic_read_unchecked(&m->error_count))
26909 m = NULL;
26910
26911 if (likely(m))
26912 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26913 }
26914
26915 ms->mirror[mirror].ms = ms;
26916 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26917 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26918 ms->mirror[mirror].error_type = 0;
26919 ms->mirror[mirror].offset = offset;
26920
26921 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26922 */
26923 static char device_status_char(struct mirror *m)
26924 {
26925 - if (!atomic_read(&(m->error_count)))
26926 + if (!atomic_read_unchecked(&(m->error_count)))
26927 return 'A';
26928
26929 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26930 diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
26931 --- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26932 +++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26933 @@ -20,7 +20,7 @@ struct stripe {
26934 struct dm_dev *dev;
26935 sector_t physical_start;
26936
26937 - atomic_t error_count;
26938 + atomic_unchecked_t error_count;
26939 };
26940
26941 struct stripe_c {
26942 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26943 kfree(sc);
26944 return r;
26945 }
26946 - atomic_set(&(sc->stripe[i].error_count), 0);
26947 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26948 }
26949
26950 ti->private = sc;
26951 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26952 DMEMIT("%d ", sc->stripes);
26953 for (i = 0; i < sc->stripes; i++) {
26954 DMEMIT("%s ", sc->stripe[i].dev->name);
26955 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26956 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26957 'D' : 'A';
26958 }
26959 buffer[i] = '\0';
26960 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26961 */
26962 for (i = 0; i < sc->stripes; i++)
26963 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26964 - atomic_inc(&(sc->stripe[i].error_count));
26965 - if (atomic_read(&(sc->stripe[i].error_count)) <
26966 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
26967 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26968 DM_IO_ERROR_THRESHOLD)
26969 schedule_work(&sc->trigger_event);
26970 }
26971 diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
26972 --- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26973 +++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26974 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26975 if (!dev_size)
26976 return 0;
26977
26978 - if ((start >= dev_size) || (start + len > dev_size)) {
26979 + if ((start >= dev_size) || (len > dev_size - start)) {
26980 DMWARN("%s: %s too small for target: "
26981 "start=%llu, len=%llu, dev_size=%llu",
26982 dm_device_name(ti->table->md), bdevname(bdev, b),
26983 diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
26984 --- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26985 +++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26986 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26987 * start build, activate spare
26988 */
26989 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26990 -static atomic_t md_event_count;
26991 +static atomic_unchecked_t md_event_count;
26992 void md_new_event(mddev_t *mddev)
26993 {
26994 - atomic_inc(&md_event_count);
26995 + atomic_inc_unchecked(&md_event_count);
26996 wake_up(&md_event_waiters);
26997 }
26998 EXPORT_SYMBOL_GPL(md_new_event);
26999 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27000 */
27001 static void md_new_event_inintr(mddev_t *mddev)
27002 {
27003 - atomic_inc(&md_event_count);
27004 + atomic_inc_unchecked(&md_event_count);
27005 wake_up(&md_event_waiters);
27006 }
27007
27008 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27009
27010 rdev->preferred_minor = 0xffff;
27011 rdev->data_offset = le64_to_cpu(sb->data_offset);
27012 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27013 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27014
27015 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27016 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27017 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27018 else
27019 sb->resync_offset = cpu_to_le64(0);
27020
27021 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27022 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27023
27024 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27025 sb->size = cpu_to_le64(mddev->dev_sectors);
27026 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27027 static ssize_t
27028 errors_show(mdk_rdev_t *rdev, char *page)
27029 {
27030 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27031 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27032 }
27033
27034 static ssize_t
27035 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27036 char *e;
27037 unsigned long n = simple_strtoul(buf, &e, 10);
27038 if (*buf && (*e == 0 || *e == '\n')) {
27039 - atomic_set(&rdev->corrected_errors, n);
27040 + atomic_set_unchecked(&rdev->corrected_errors, n);
27041 return len;
27042 }
27043 return -EINVAL;
27044 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27045 rdev->last_read_error.tv_sec = 0;
27046 rdev->last_read_error.tv_nsec = 0;
27047 atomic_set(&rdev->nr_pending, 0);
27048 - atomic_set(&rdev->read_errors, 0);
27049 - atomic_set(&rdev->corrected_errors, 0);
27050 + atomic_set_unchecked(&rdev->read_errors, 0);
27051 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27052
27053 INIT_LIST_HEAD(&rdev->same_set);
27054 init_waitqueue_head(&rdev->blocked_wait);
27055 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27056
27057 spin_unlock(&pers_lock);
27058 seq_printf(seq, "\n");
27059 - mi->event = atomic_read(&md_event_count);
27060 + mi->event = atomic_read_unchecked(&md_event_count);
27061 return 0;
27062 }
27063 if (v == (void*)2) {
27064 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27065 chunk_kb ? "KB" : "B");
27066 if (bitmap->file) {
27067 seq_printf(seq, ", file: ");
27068 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27069 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27070 }
27071
27072 seq_printf(seq, "\n");
27073 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27074 else {
27075 struct seq_file *p = file->private_data;
27076 p->private = mi;
27077 - mi->event = atomic_read(&md_event_count);
27078 + mi->event = atomic_read_unchecked(&md_event_count);
27079 }
27080 return error;
27081 }
27082 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27083 /* always allow read */
27084 mask = POLLIN | POLLRDNORM;
27085
27086 - if (mi->event != atomic_read(&md_event_count))
27087 + if (mi->event != atomic_read_unchecked(&md_event_count))
27088 mask |= POLLERR | POLLPRI;
27089 return mask;
27090 }
27091 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27092 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27093 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27094 (int)part_stat_read(&disk->part0, sectors[1]) -
27095 - atomic_read(&disk->sync_io);
27096 + atomic_read_unchecked(&disk->sync_io);
27097 /* sync IO will cause sync_io to increase before the disk_stats
27098 * as sync_io is counted when a request starts, and
27099 * disk_stats is counted when it completes.
27100 diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27101 --- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27102 +++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27103 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27104 * only maintained for arrays that
27105 * support hot removal
27106 */
27107 - atomic_t read_errors; /* number of consecutive read errors that
27108 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27109 * we have tried to ignore.
27110 */
27111 struct timespec last_read_error; /* monotonic time since our
27112 * last read error
27113 */
27114 - atomic_t corrected_errors; /* number of corrected read errors,
27115 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27116 * for reporting to userspace and storing
27117 * in superblock.
27118 */
27119 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27120
27121 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27122 {
27123 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27124 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27125 }
27126
27127 struct mdk_personality
27128 diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27129 --- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27130 +++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27131 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27132 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27133 set_bit(R10BIO_Uptodate, &r10_bio->state);
27134 else {
27135 - atomic_add(r10_bio->sectors,
27136 + atomic_add_unchecked(r10_bio->sectors,
27137 &conf->mirrors[d].rdev->corrected_errors);
27138 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27139 md_error(r10_bio->mddev,
27140 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27141 {
27142 struct timespec cur_time_mon;
27143 unsigned long hours_since_last;
27144 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27145 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27146
27147 ktime_get_ts(&cur_time_mon);
27148
27149 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27150 * overflowing the shift of read_errors by hours_since_last.
27151 */
27152 if (hours_since_last >= 8 * sizeof(read_errors))
27153 - atomic_set(&rdev->read_errors, 0);
27154 + atomic_set_unchecked(&rdev->read_errors, 0);
27155 else
27156 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27157 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27158 }
27159
27160 /*
27161 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27162 return;
27163
27164 check_decay_read_errors(mddev, rdev);
27165 - atomic_inc(&rdev->read_errors);
27166 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27167 + atomic_inc_unchecked(&rdev->read_errors);
27168 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27169 char b[BDEVNAME_SIZE];
27170 bdevname(rdev->bdev, b);
27171
27172 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27173 "md/raid10:%s: %s: Raid device exceeded "
27174 "read_error threshold [cur %d:max %d]\n",
27175 mdname(mddev), b,
27176 - atomic_read(&rdev->read_errors), max_read_errors);
27177 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27178 printk(KERN_NOTICE
27179 "md/raid10:%s: %s: Failing raid device\n",
27180 mdname(mddev), b);
27181 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27182 test_bit(In_sync, &rdev->flags)) {
27183 atomic_inc(&rdev->nr_pending);
27184 rcu_read_unlock();
27185 - atomic_add(s, &rdev->corrected_errors);
27186 + atomic_add_unchecked(s, &rdev->corrected_errors);
27187 if (sync_page_io(rdev,
27188 r10_bio->devs[sl].addr +
27189 sect,
27190 diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27191 --- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27192 +++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27193 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27194 rdev_dec_pending(rdev, mddev);
27195 md_error(mddev, rdev);
27196 } else
27197 - atomic_add(s, &rdev->corrected_errors);
27198 + atomic_add_unchecked(s, &rdev->corrected_errors);
27199 }
27200 d = start;
27201 while (d != r1_bio->read_disk) {
27202 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27203 /* Well, this device is dead */
27204 md_error(mddev, rdev);
27205 else {
27206 - atomic_add(s, &rdev->corrected_errors);
27207 + atomic_add_unchecked(s, &rdev->corrected_errors);
27208 printk(KERN_INFO
27209 "md/raid1:%s: read error corrected "
27210 "(%d sectors at %llu on %s)\n",
27211 diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27212 --- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27213 +++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27214 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27215 bi->bi_next = NULL;
27216 if ((rw & WRITE) &&
27217 test_bit(R5_ReWrite, &sh->dev[i].flags))
27218 - atomic_add(STRIPE_SECTORS,
27219 + atomic_add_unchecked(STRIPE_SECTORS,
27220 &rdev->corrected_errors);
27221 generic_make_request(bi);
27222 } else {
27223 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27224 clear_bit(R5_ReadError, &sh->dev[i].flags);
27225 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27226 }
27227 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27228 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27229 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27230 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27231 } else {
27232 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27233 int retry = 0;
27234 rdev = conf->disks[i].rdev;
27235
27236 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27237 - atomic_inc(&rdev->read_errors);
27238 + atomic_inc_unchecked(&rdev->read_errors);
27239 if (conf->mddev->degraded >= conf->max_degraded)
27240 printk_rl(KERN_WARNING
27241 "md/raid:%s: read error not correctable "
27242 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27243 (unsigned long long)(sh->sector
27244 + rdev->data_offset),
27245 bdn);
27246 - else if (atomic_read(&rdev->read_errors)
27247 + else if (atomic_read_unchecked(&rdev->read_errors)
27248 > conf->max_nr_stripes)
27249 printk(KERN_WARNING
27250 "md/raid:%s: Too many read errors, failing device %s.\n",
27251 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27252 sector_t r_sector;
27253 struct stripe_head sh2;
27254
27255 + pax_track_stack();
27256
27257 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27258 stripe = new_sector;
27259 diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27260 --- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27261 +++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27262 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27263
27264 int x[32], y[32], w[32], h[32];
27265
27266 + pax_track_stack();
27267 +
27268 /* clear out memory */
27269 memset(&line_list[0], 0x00, sizeof(u32)*32);
27270 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27271 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27272 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27273 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27274 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27275 u8 buf[HOST_LINK_BUF_SIZE];
27276 int i;
27277
27278 + pax_track_stack();
27279 +
27280 dprintk("%s\n", __func__);
27281
27282 /* check if we have space for a link buf in the rx_buffer */
27283 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27284 unsigned long timeout;
27285 int written;
27286
27287 + pax_track_stack();
27288 +
27289 dprintk("%s\n", __func__);
27290
27291 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27292 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27293 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27294 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27295 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27296 union {
27297 struct dmx_ts_feed ts;
27298 struct dmx_section_feed sec;
27299 - } feed;
27300 + } __no_const feed;
27301
27302 union {
27303 dmx_ts_cb ts;
27304 dmx_section_cb sec;
27305 - } cb;
27306 + } __no_const cb;
27307
27308 struct dvb_demux *demux;
27309 void *priv;
27310 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27311 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27312 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27313 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27314 const struct dvb_device *template, void *priv, int type)
27315 {
27316 struct dvb_device *dvbdev;
27317 - struct file_operations *dvbdevfops;
27318 + file_operations_no_const *dvbdevfops;
27319 struct device *clsdev;
27320 int minor;
27321 int id;
27322 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27323 --- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27324 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27325 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27326 struct dib0700_adapter_state {
27327 int (*set_param_save) (struct dvb_frontend *,
27328 struct dvb_frontend_parameters *);
27329 -};
27330 +} __no_const;
27331
27332 static int dib7070_set_param_override(struct dvb_frontend *fe,
27333 struct dvb_frontend_parameters *fep)
27334 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27335 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27336 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27337 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27338 if (!buf)
27339 return -ENOMEM;
27340
27341 + pax_track_stack();
27342 +
27343 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27344 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27345 hx.addr, hx.len, hx.chk);
27346 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27347 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27348 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27349 @@ -97,7 +97,7 @@
27350 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27351
27352 struct dibusb_state {
27353 - struct dib_fe_xfer_ops ops;
27354 + dib_fe_xfer_ops_no_const ops;
27355 int mt2060_present;
27356 u8 tuner_addr;
27357 };
27358 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27359 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27360 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27361 @@ -95,7 +95,7 @@ struct su3000_state {
27362
27363 struct s6x0_state {
27364 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27365 -};
27366 +} __no_const;
27367
27368 /* debug */
27369 static int dvb_usb_dw2102_debug;
27370 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27371 --- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27372 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27373 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27374 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27375 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27376
27377 + pax_track_stack();
27378
27379 data[0] = 0x8a;
27380 len_in = 1;
27381 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27382 int ret = 0, len_in;
27383 u8 data[512] = {0};
27384
27385 + pax_track_stack();
27386 +
27387 data[0] = 0x0a;
27388 len_in = 1;
27389 info("FRM Firmware Cold Reset");
27390 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27391 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27392 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27393 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27394 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27395 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27396 };
27397 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27398
27399 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27400 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27401 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27402 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27403 #else
27404 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27405 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27406 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27407 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27408 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27409 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27410 static struct dvb_frontend_ops dib3000mb_ops;
27411
27412 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27413 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27414 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27415 {
27416 struct dib3000_state* state = NULL;
27417
27418 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27419 --- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27420 +++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27421 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27422 int ret = -1;
27423 int sync;
27424
27425 + pax_track_stack();
27426 +
27427 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27428
27429 fcp = 3000;
27430 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27431 --- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27432 +++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27433 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27434 u8 tudata[585];
27435 int i;
27436
27437 + pax_track_stack();
27438 +
27439 dprintk("Firmware is %zd bytes\n",fw->size);
27440
27441 /* Get eprom data */
27442 diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27443 --- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27444 +++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27445 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27446 struct i2c_client c;
27447 u8 eedata[256];
27448
27449 + pax_track_stack();
27450 +
27451 memset(&c, 0, sizeof(c));
27452 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27453 c.adapter = &cx->i2c_adap[0];
27454 diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
27455 --- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27456 +++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27457 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27458 bool handle = false;
27459 struct ir_raw_event ir_core_event[64];
27460
27461 + pax_track_stack();
27462 +
27463 do {
27464 num = 0;
27465 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27466 diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27467 --- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27468 +++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27469 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27470 u8 *eeprom;
27471 struct tveeprom tvdata;
27472
27473 + pax_track_stack();
27474 +
27475 memset(&tvdata,0,sizeof(tvdata));
27476
27477 eeprom = pvr2_eeprom_fetch(hdw);
27478 diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
27479 --- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27480 +++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27481 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27482 unsigned char localPAT[256];
27483 unsigned char localPMT[256];
27484
27485 + pax_track_stack();
27486 +
27487 /* Set video format - must be done first as it resets other settings */
27488 set_reg8(client, 0x41, h->video_format);
27489
27490 diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
27491 --- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27492 +++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27493 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27494 u8 tmp[512];
27495 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27496
27497 + pax_track_stack();
27498 +
27499 /* While any outstand message on the bus exists... */
27500 do {
27501
27502 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27503 u8 tmp[512];
27504 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27505
27506 + pax_track_stack();
27507 +
27508 while (loop) {
27509
27510 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27511 diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
27512 --- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27513 +++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27514 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27515
27516 /* Platform device functions */
27517
27518 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27519 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27520 .vidioc_querycap = timblogiw_querycap,
27521 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27522 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27523 diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
27524 --- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27525 +++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27526 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27527 unsigned char rv, gv, bv;
27528 static unsigned char *Y, *U, *V;
27529
27530 + pax_track_stack();
27531 +
27532 frame = usbvision->cur_frame;
27533 image_size = frame->frmwidth * frame->frmheight;
27534 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27535 diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
27536 --- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27537 +++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27538 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27539 {
27540 struct videobuf_queue q;
27541
27542 + pax_track_stack();
27543 +
27544 /* Required to make generic handler to call __videobuf_alloc */
27545 q.int_ops = &sg_ops;
27546
27547 diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
27548 --- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27549 +++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27550 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27551 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27552 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27553
27554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27555 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27556 +#else
27557 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27558 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27559 +#endif
27560 +
27561 /*
27562 * Rounding UP to nearest 4-kB boundary here...
27563 */
27564 diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
27565 --- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27566 +++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27567 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27568 return 0;
27569 }
27570
27571 +static inline void
27572 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27573 +{
27574 + if (phy_info->port_details) {
27575 + phy_info->port_details->rphy = rphy;
27576 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27577 + ioc->name, rphy));
27578 + }
27579 +
27580 + if (rphy) {
27581 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27582 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27583 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27584 + ioc->name, rphy, rphy->dev.release));
27585 + }
27586 +}
27587 +
27588 /* no mutex */
27589 static void
27590 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27591 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27592 return NULL;
27593 }
27594
27595 -static inline void
27596 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27597 -{
27598 - if (phy_info->port_details) {
27599 - phy_info->port_details->rphy = rphy;
27600 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27601 - ioc->name, rphy));
27602 - }
27603 -
27604 - if (rphy) {
27605 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27606 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27607 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27608 - ioc->name, rphy, rphy->dev.release));
27609 - }
27610 -}
27611 -
27612 static inline struct sas_port *
27613 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27614 {
27615 diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
27616 --- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27617 +++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27618 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27619
27620 h = shost_priv(SChost);
27621
27622 - if (h) {
27623 - if (h->info_kbuf == NULL)
27624 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27625 - return h->info_kbuf;
27626 - h->info_kbuf[0] = '\0';
27627 + if (!h)
27628 + return NULL;
27629
27630 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27631 - h->info_kbuf[size-1] = '\0';
27632 - }
27633 + if (h->info_kbuf == NULL)
27634 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27635 + return h->info_kbuf;
27636 + h->info_kbuf[0] = '\0';
27637 +
27638 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27639 + h->info_kbuf[size-1] = '\0';
27640
27641 return h->info_kbuf;
27642 }
27643 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
27644 --- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27645 +++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27646 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27647 struct i2o_message *msg;
27648 unsigned int iop;
27649
27650 + pax_track_stack();
27651 +
27652 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27653 return -EFAULT;
27654
27655 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
27656 --- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27657 +++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27658 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27659 "Array Controller Device"
27660 };
27661
27662 -static char *chtostr(u8 * chars, int n)
27663 -{
27664 - char tmp[256];
27665 - tmp[0] = 0;
27666 - return strncat(tmp, (char *)chars, n);
27667 -}
27668 -
27669 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27670 char *group)
27671 {
27672 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27673
27674 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27675 seq_printf(seq, "%-#8x", ddm_table.module_id);
27676 - seq_printf(seq, "%-29s",
27677 - chtostr(ddm_table.module_name_version, 28));
27678 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27679 seq_printf(seq, "%9d ", ddm_table.data_size);
27680 seq_printf(seq, "%8d", ddm_table.code_size);
27681
27682 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27683
27684 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27685 seq_printf(seq, "%-#8x", dst->module_id);
27686 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27687 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27688 + seq_printf(seq, "%-.28s", dst->module_name_version);
27689 + seq_printf(seq, "%-.8s", dst->date);
27690 seq_printf(seq, "%8d ", dst->module_size);
27691 seq_printf(seq, "%8d ", dst->mpb_size);
27692 seq_printf(seq, "0x%04x", dst->module_flags);
27693 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27694 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27695 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27696 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27697 - seq_printf(seq, "Vendor info : %s\n",
27698 - chtostr((u8 *) (work32 + 2), 16));
27699 - seq_printf(seq, "Product info : %s\n",
27700 - chtostr((u8 *) (work32 + 6), 16));
27701 - seq_printf(seq, "Description : %s\n",
27702 - chtostr((u8 *) (work32 + 10), 16));
27703 - seq_printf(seq, "Product rev. : %s\n",
27704 - chtostr((u8 *) (work32 + 14), 8));
27705 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27706 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27707 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27708 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27709
27710 seq_printf(seq, "Serial number : ");
27711 print_serial_number(seq, (u8 *) (work32 + 16),
27712 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27713 }
27714
27715 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27716 - seq_printf(seq, "Module name : %s\n",
27717 - chtostr(result.module_name, 24));
27718 - seq_printf(seq, "Module revision : %s\n",
27719 - chtostr(result.module_rev, 8));
27720 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27721 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27722
27723 seq_printf(seq, "Serial number : ");
27724 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27725 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27726 return 0;
27727 }
27728
27729 - seq_printf(seq, "Device name : %s\n",
27730 - chtostr(result.device_name, 64));
27731 - seq_printf(seq, "Service name : %s\n",
27732 - chtostr(result.service_name, 64));
27733 - seq_printf(seq, "Physical name : %s\n",
27734 - chtostr(result.physical_location, 64));
27735 - seq_printf(seq, "Instance number : %s\n",
27736 - chtostr(result.instance_number, 4));
27737 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27738 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27739 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27740 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27741
27742 return 0;
27743 }
27744 diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
27745 --- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27746 +++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27747 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27748
27749 spin_lock_irqsave(&c->context_list_lock, flags);
27750
27751 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27752 - atomic_inc(&c->context_list_counter);
27753 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27754 + atomic_inc_unchecked(&c->context_list_counter);
27755
27756 - entry->context = atomic_read(&c->context_list_counter);
27757 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27758
27759 list_add(&entry->list, &c->context_list);
27760
27761 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27762
27763 #if BITS_PER_LONG == 64
27764 spin_lock_init(&c->context_list_lock);
27765 - atomic_set(&c->context_list_counter, 0);
27766 + atomic_set_unchecked(&c->context_list_counter, 0);
27767 INIT_LIST_HEAD(&c->context_list);
27768 #endif
27769
27770 diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
27771 --- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27772 +++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27773 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27774
27775 struct abx500_device_entry {
27776 struct list_head list;
27777 - struct abx500_ops ops;
27778 + abx500_ops_no_const ops;
27779 struct device *dev;
27780 };
27781
27782 diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
27783 --- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27784 +++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27785 @@ -13,6 +13,7 @@
27786
27787 #include <linux/kernel.h>
27788 #include <linux/module.h>
27789 +#include <linux/slab.h>
27790 #include <linux/init.h>
27791 #include <linux/pci.h>
27792 #include <linux/interrupt.h>
27793 diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
27794 --- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27795 +++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27796 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27797 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27798 int ret;
27799
27800 + pax_track_stack();
27801 +
27802 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27803 return -EINVAL;
27804
27805 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
27806 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27807 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27808 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27809 * the lid is closed. This leads to interrupts as soon as a little move
27810 * is done.
27811 */
27812 - atomic_inc(&lis3_dev.count);
27813 + atomic_inc_unchecked(&lis3_dev.count);
27814
27815 wake_up_interruptible(&lis3_dev.misc_wait);
27816 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27817 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27818 if (lis3_dev.pm_dev)
27819 pm_runtime_get_sync(lis3_dev.pm_dev);
27820
27821 - atomic_set(&lis3_dev.count, 0);
27822 + atomic_set_unchecked(&lis3_dev.count, 0);
27823 return 0;
27824 }
27825
27826 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27827 add_wait_queue(&lis3_dev.misc_wait, &wait);
27828 while (true) {
27829 set_current_state(TASK_INTERRUPTIBLE);
27830 - data = atomic_xchg(&lis3_dev.count, 0);
27831 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27832 if (data)
27833 break;
27834
27835 @@ -583,7 +583,7 @@ out:
27836 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27837 {
27838 poll_wait(file, &lis3_dev.misc_wait, wait);
27839 - if (atomic_read(&lis3_dev.count))
27840 + if (atomic_read_unchecked(&lis3_dev.count))
27841 return POLLIN | POLLRDNORM;
27842 return 0;
27843 }
27844 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
27845 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27846 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27847 @@ -265,7 +265,7 @@ struct lis3lv02d {
27848 struct input_polled_dev *idev; /* input device */
27849 struct platform_device *pdev; /* platform device */
27850 struct regulator_bulk_data regulators[2];
27851 - atomic_t count; /* interrupt count after last read */
27852 + atomic_unchecked_t count; /* interrupt count after last read */
27853 union axis_conversion ac; /* hw -> logical axis */
27854 int mapped_btns[3];
27855
27856 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
27857 --- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27858 +++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27859 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27860 unsigned long nsec;
27861
27862 nsec = CLKS2NSEC(clks);
27863 - atomic_long_inc(&mcs_op_statistics[op].count);
27864 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
27865 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27866 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27867 if (mcs_op_statistics[op].max < nsec)
27868 mcs_op_statistics[op].max = nsec;
27869 }
27870 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
27871 --- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27872 +++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27873 @@ -32,9 +32,9 @@
27874
27875 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27876
27877 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27878 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27879 {
27880 - unsigned long val = atomic_long_read(v);
27881 + unsigned long val = atomic_long_read_unchecked(v);
27882
27883 seq_printf(s, "%16lu %s\n", val, id);
27884 }
27885 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27886
27887 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27888 for (op = 0; op < mcsop_last; op++) {
27889 - count = atomic_long_read(&mcs_op_statistics[op].count);
27890 - total = atomic_long_read(&mcs_op_statistics[op].total);
27891 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27892 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27893 max = mcs_op_statistics[op].max;
27894 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27895 count ? total / count : 0, max);
27896 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
27897 --- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27898 +++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27899 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27900 * GRU statistics.
27901 */
27902 struct gru_stats_s {
27903 - atomic_long_t vdata_alloc;
27904 - atomic_long_t vdata_free;
27905 - atomic_long_t gts_alloc;
27906 - atomic_long_t gts_free;
27907 - atomic_long_t gms_alloc;
27908 - atomic_long_t gms_free;
27909 - atomic_long_t gts_double_allocate;
27910 - atomic_long_t assign_context;
27911 - atomic_long_t assign_context_failed;
27912 - atomic_long_t free_context;
27913 - atomic_long_t load_user_context;
27914 - atomic_long_t load_kernel_context;
27915 - atomic_long_t lock_kernel_context;
27916 - atomic_long_t unlock_kernel_context;
27917 - atomic_long_t steal_user_context;
27918 - atomic_long_t steal_kernel_context;
27919 - atomic_long_t steal_context_failed;
27920 - atomic_long_t nopfn;
27921 - atomic_long_t asid_new;
27922 - atomic_long_t asid_next;
27923 - atomic_long_t asid_wrap;
27924 - atomic_long_t asid_reuse;
27925 - atomic_long_t intr;
27926 - atomic_long_t intr_cbr;
27927 - atomic_long_t intr_tfh;
27928 - atomic_long_t intr_spurious;
27929 - atomic_long_t intr_mm_lock_failed;
27930 - atomic_long_t call_os;
27931 - atomic_long_t call_os_wait_queue;
27932 - atomic_long_t user_flush_tlb;
27933 - atomic_long_t user_unload_context;
27934 - atomic_long_t user_exception;
27935 - atomic_long_t set_context_option;
27936 - atomic_long_t check_context_retarget_intr;
27937 - atomic_long_t check_context_unload;
27938 - atomic_long_t tlb_dropin;
27939 - atomic_long_t tlb_preload_page;
27940 - atomic_long_t tlb_dropin_fail_no_asid;
27941 - atomic_long_t tlb_dropin_fail_upm;
27942 - atomic_long_t tlb_dropin_fail_invalid;
27943 - atomic_long_t tlb_dropin_fail_range_active;
27944 - atomic_long_t tlb_dropin_fail_idle;
27945 - atomic_long_t tlb_dropin_fail_fmm;
27946 - atomic_long_t tlb_dropin_fail_no_exception;
27947 - atomic_long_t tfh_stale_on_fault;
27948 - atomic_long_t mmu_invalidate_range;
27949 - atomic_long_t mmu_invalidate_page;
27950 - atomic_long_t flush_tlb;
27951 - atomic_long_t flush_tlb_gru;
27952 - atomic_long_t flush_tlb_gru_tgh;
27953 - atomic_long_t flush_tlb_gru_zero_asid;
27954 -
27955 - atomic_long_t copy_gpa;
27956 - atomic_long_t read_gpa;
27957 -
27958 - atomic_long_t mesq_receive;
27959 - atomic_long_t mesq_receive_none;
27960 - atomic_long_t mesq_send;
27961 - atomic_long_t mesq_send_failed;
27962 - atomic_long_t mesq_noop;
27963 - atomic_long_t mesq_send_unexpected_error;
27964 - atomic_long_t mesq_send_lb_overflow;
27965 - atomic_long_t mesq_send_qlimit_reached;
27966 - atomic_long_t mesq_send_amo_nacked;
27967 - atomic_long_t mesq_send_put_nacked;
27968 - atomic_long_t mesq_page_overflow;
27969 - atomic_long_t mesq_qf_locked;
27970 - atomic_long_t mesq_qf_noop_not_full;
27971 - atomic_long_t mesq_qf_switch_head_failed;
27972 - atomic_long_t mesq_qf_unexpected_error;
27973 - atomic_long_t mesq_noop_unexpected_error;
27974 - atomic_long_t mesq_noop_lb_overflow;
27975 - atomic_long_t mesq_noop_qlimit_reached;
27976 - atomic_long_t mesq_noop_amo_nacked;
27977 - atomic_long_t mesq_noop_put_nacked;
27978 - atomic_long_t mesq_noop_page_overflow;
27979 + atomic_long_unchecked_t vdata_alloc;
27980 + atomic_long_unchecked_t vdata_free;
27981 + atomic_long_unchecked_t gts_alloc;
27982 + atomic_long_unchecked_t gts_free;
27983 + atomic_long_unchecked_t gms_alloc;
27984 + atomic_long_unchecked_t gms_free;
27985 + atomic_long_unchecked_t gts_double_allocate;
27986 + atomic_long_unchecked_t assign_context;
27987 + atomic_long_unchecked_t assign_context_failed;
27988 + atomic_long_unchecked_t free_context;
27989 + atomic_long_unchecked_t load_user_context;
27990 + atomic_long_unchecked_t load_kernel_context;
27991 + atomic_long_unchecked_t lock_kernel_context;
27992 + atomic_long_unchecked_t unlock_kernel_context;
27993 + atomic_long_unchecked_t steal_user_context;
27994 + atomic_long_unchecked_t steal_kernel_context;
27995 + atomic_long_unchecked_t steal_context_failed;
27996 + atomic_long_unchecked_t nopfn;
27997 + atomic_long_unchecked_t asid_new;
27998 + atomic_long_unchecked_t asid_next;
27999 + atomic_long_unchecked_t asid_wrap;
28000 + atomic_long_unchecked_t asid_reuse;
28001 + atomic_long_unchecked_t intr;
28002 + atomic_long_unchecked_t intr_cbr;
28003 + atomic_long_unchecked_t intr_tfh;
28004 + atomic_long_unchecked_t intr_spurious;
28005 + atomic_long_unchecked_t intr_mm_lock_failed;
28006 + atomic_long_unchecked_t call_os;
28007 + atomic_long_unchecked_t call_os_wait_queue;
28008 + atomic_long_unchecked_t user_flush_tlb;
28009 + atomic_long_unchecked_t user_unload_context;
28010 + atomic_long_unchecked_t user_exception;
28011 + atomic_long_unchecked_t set_context_option;
28012 + atomic_long_unchecked_t check_context_retarget_intr;
28013 + atomic_long_unchecked_t check_context_unload;
28014 + atomic_long_unchecked_t tlb_dropin;
28015 + atomic_long_unchecked_t tlb_preload_page;
28016 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28017 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28018 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28019 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28020 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28021 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28022 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28023 + atomic_long_unchecked_t tfh_stale_on_fault;
28024 + atomic_long_unchecked_t mmu_invalidate_range;
28025 + atomic_long_unchecked_t mmu_invalidate_page;
28026 + atomic_long_unchecked_t flush_tlb;
28027 + atomic_long_unchecked_t flush_tlb_gru;
28028 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28029 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28030 +
28031 + atomic_long_unchecked_t copy_gpa;
28032 + atomic_long_unchecked_t read_gpa;
28033 +
28034 + atomic_long_unchecked_t mesq_receive;
28035 + atomic_long_unchecked_t mesq_receive_none;
28036 + atomic_long_unchecked_t mesq_send;
28037 + atomic_long_unchecked_t mesq_send_failed;
28038 + atomic_long_unchecked_t mesq_noop;
28039 + atomic_long_unchecked_t mesq_send_unexpected_error;
28040 + atomic_long_unchecked_t mesq_send_lb_overflow;
28041 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28042 + atomic_long_unchecked_t mesq_send_amo_nacked;
28043 + atomic_long_unchecked_t mesq_send_put_nacked;
28044 + atomic_long_unchecked_t mesq_page_overflow;
28045 + atomic_long_unchecked_t mesq_qf_locked;
28046 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28047 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28048 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28049 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28050 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28051 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28052 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28053 + atomic_long_unchecked_t mesq_noop_put_nacked;
28054 + atomic_long_unchecked_t mesq_noop_page_overflow;
28055
28056 };
28057
28058 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28059 tghop_invalidate, mcsop_last};
28060
28061 struct mcs_op_statistic {
28062 - atomic_long_t count;
28063 - atomic_long_t total;
28064 + atomic_long_unchecked_t count;
28065 + atomic_long_unchecked_t total;
28066 unsigned long max;
28067 };
28068
28069 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28070
28071 #define STAT(id) do { \
28072 if (gru_options & OPT_STATS) \
28073 - atomic_long_inc(&gru_stats.id); \
28074 + atomic_long_inc_unchecked(&gru_stats.id); \
28075 } while (0)
28076
28077 #ifdef CONFIG_SGI_GRU_DEBUG
28078 diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28079 --- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28080 +++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28081 @@ -289,7 +289,7 @@ struct xpc_interface {
28082 xpc_notify_func, void *);
28083 void (*received) (short, int, void *);
28084 enum xp_retval (*partid_to_nasids) (short, void *);
28085 -};
28086 +} __no_const;
28087
28088 extern struct xpc_interface xpc_interface;
28089
28090 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28091 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28092 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28093 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28094 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28095 unsigned long timeo = jiffies + HZ;
28096
28097 + pax_track_stack();
28098 +
28099 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28100 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28101 goto sleep;
28102 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28103 unsigned long initial_adr;
28104 int initial_len = len;
28105
28106 + pax_track_stack();
28107 +
28108 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28109 adr += chip->start;
28110 initial_adr = adr;
28111 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28112 int retries = 3;
28113 int ret;
28114
28115 + pax_track_stack();
28116 +
28117 adr += chip->start;
28118
28119 retry:
28120 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28121 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28122 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28123 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28124 unsigned long cmd_addr;
28125 struct cfi_private *cfi = map->fldrv_priv;
28126
28127 + pax_track_stack();
28128 +
28129 adr += chip->start;
28130
28131 /* Ensure cmd read/writes are aligned. */
28132 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28133 DECLARE_WAITQUEUE(wait, current);
28134 int wbufsize, z;
28135
28136 + pax_track_stack();
28137 +
28138 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28139 if (adr & (map_bankwidth(map)-1))
28140 return -EINVAL;
28141 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28142 DECLARE_WAITQUEUE(wait, current);
28143 int ret = 0;
28144
28145 + pax_track_stack();
28146 +
28147 adr += chip->start;
28148
28149 /* Let's determine this according to the interleave only once */
28150 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28151 unsigned long timeo = jiffies + HZ;
28152 DECLARE_WAITQUEUE(wait, current);
28153
28154 + pax_track_stack();
28155 +
28156 adr += chip->start;
28157
28158 /* Let's determine this according to the interleave only once */
28159 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28160 unsigned long timeo = jiffies + HZ;
28161 DECLARE_WAITQUEUE(wait, current);
28162
28163 + pax_track_stack();
28164 +
28165 adr += chip->start;
28166
28167 /* Let's determine this according to the interleave only once */
28168 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28169 --- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28170 +++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28171 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28172
28173 /* The ECC will not be calculated correctly if less than 512 is written */
28174 /* DBB-
28175 - if (len != 0x200 && eccbuf)
28176 + if (len != 0x200)
28177 printk(KERN_WARNING
28178 "ECC needs a full sector write (adr: %lx size %lx)\n",
28179 (long) to, (long) len);
28180 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28181 --- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28182 +++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28183 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28184 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28185
28186 /* Don't allow read past end of device */
28187 - if (from >= this->totlen)
28188 + if (from >= this->totlen || !len)
28189 return -EINVAL;
28190
28191 /* Don't allow a single read to cross a 512-byte block boundary */
28192 diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28193 --- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28194 +++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28195 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28196 loff_t offset;
28197 uint16_t srcunitswap = cpu_to_le16(srcunit);
28198
28199 + pax_track_stack();
28200 +
28201 eun = &part->EUNInfo[srcunit];
28202 xfer = &part->XferInfo[xferunit];
28203 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28204 diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28205 --- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28206 +++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28207 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28208 struct inftl_oob oob;
28209 size_t retlen;
28210
28211 + pax_track_stack();
28212 +
28213 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28214 "pending=%d)\n", inftl, thisVUC, pendingblock);
28215
28216 diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28217 --- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28218 +++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28219 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28220 struct INFTLPartition *ip;
28221 size_t retlen;
28222
28223 + pax_track_stack();
28224 +
28225 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28226
28227 /*
28228 diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28229 --- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28230 +++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28231 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28232 {
28233 map_word pfow_val[4];
28234
28235 + pax_track_stack();
28236 +
28237 /* Check identification string */
28238 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28239 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28240 diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28241 --- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28242 +++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28243 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28244 u_long size;
28245 struct mtd_info_user info;
28246
28247 + pax_track_stack();
28248 +
28249 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28250
28251 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28252 diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28253 --- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28254 +++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28255 @@ -26,6 +26,7 @@
28256 #include <linux/pci.h>
28257 #include <linux/mtd/mtd.h>
28258 #include <linux/module.h>
28259 +#include <linux/slab.h>
28260
28261 #include "denali.h"
28262
28263 diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28264 --- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28265 +++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28266 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28267 int inplace = 1;
28268 size_t retlen;
28269
28270 + pax_track_stack();
28271 +
28272 memset(BlockMap, 0xff, sizeof(BlockMap));
28273 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28274
28275 diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28276 --- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28277 +++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28278 @@ -24,6 +24,7 @@
28279 #include <asm/errno.h>
28280 #include <linux/delay.h>
28281 #include <linux/slab.h>
28282 +#include <linux/sched.h>
28283 #include <linux/mtd/mtd.h>
28284 #include <linux/mtd/nand.h>
28285 #include <linux/mtd/nftl.h>
28286 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28287 struct mtd_info *mtd = nftl->mbd.mtd;
28288 unsigned int i;
28289
28290 + pax_track_stack();
28291 +
28292 /* Assume logical EraseSize == physical erasesize for starting the scan.
28293 We'll sort it out later if we find a MediaHeader which says otherwise */
28294 /* Actually, we won't. The new DiskOnChip driver has already scanned
28295 diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28296 --- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28297 +++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28298 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28299 static int __init bytes_str_to_int(const char *str)
28300 {
28301 char *endp;
28302 - unsigned long result;
28303 + unsigned long result, scale = 1;
28304
28305 result = simple_strtoul(str, &endp, 0);
28306 if (str == endp || result >= INT_MAX) {
28307 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28308
28309 switch (*endp) {
28310 case 'G':
28311 - result *= 1024;
28312 + scale *= 1024;
28313 case 'M':
28314 - result *= 1024;
28315 + scale *= 1024;
28316 case 'K':
28317 - result *= 1024;
28318 + scale *= 1024;
28319 if (endp[1] == 'i' && endp[2] == 'B')
28320 endp += 2;
28321 case '\0':
28322 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28323 return -EINVAL;
28324 }
28325
28326 - return result;
28327 + if ((intoverflow_t)result*scale >= INT_MAX) {
28328 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28329 + str);
28330 + return -EINVAL;
28331 + }
28332 +
28333 + return result*scale;
28334 }
28335
28336 /**
28337 diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28338 --- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28339 +++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28340 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28341 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28342 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28343
28344 -static struct bfa_ioc_hwif nw_hwif_ct;
28345 +static struct bfa_ioc_hwif nw_hwif_ct = {
28346 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28347 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28348 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28349 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28350 + .ioc_map_port = bfa_ioc_ct_map_port,
28351 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28352 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28353 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28354 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28355 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28356 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28357 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28358 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28359 +};
28360
28361 /**
28362 * Called from bfa_ioc_attach() to map asic specific calls.
28363 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28364 void
28365 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28366 {
28367 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28368 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28369 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28370 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28371 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28372 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28373 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28374 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28375 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28376 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28377 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28378 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28379 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28380 -
28381 ioc->ioc_hwif = &nw_hwif_ct;
28382 }
28383
28384 diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28385 --- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28386 +++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28387 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28388 struct bna_intr_info *intr_info =
28389 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28390 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28391 - struct bna_tx_event_cbfn tx_cbfn;
28392 + static struct bna_tx_event_cbfn tx_cbfn = {
28393 + /* Initialize the tx event handlers */
28394 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28395 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28396 + .tx_stall_cbfn = bnad_cb_tx_stall,
28397 + .tx_resume_cbfn = bnad_cb_tx_resume,
28398 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28399 + };
28400 struct bna_tx *tx;
28401 unsigned long flags;
28402
28403 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28404 tx_config->txq_depth = bnad->txq_depth;
28405 tx_config->tx_type = BNA_TX_T_REGULAR;
28406
28407 - /* Initialize the tx event handlers */
28408 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28409 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28410 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28411 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28412 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28413 -
28414 /* Get BNA's resource requirement for one tx object */
28415 spin_lock_irqsave(&bnad->bna_lock, flags);
28416 bna_tx_res_req(bnad->num_txq_per_tx,
28417 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28418 struct bna_intr_info *intr_info =
28419 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28420 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28421 - struct bna_rx_event_cbfn rx_cbfn;
28422 + static struct bna_rx_event_cbfn rx_cbfn = {
28423 + /* Initialize the Rx event handlers */
28424 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28425 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28426 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28427 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28428 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28429 + .rx_post_cbfn = bnad_cb_rx_post
28430 + };
28431 struct bna_rx *rx;
28432 unsigned long flags;
28433
28434 /* Initialize the Rx object configuration */
28435 bnad_init_rx_config(bnad, rx_config);
28436
28437 - /* Initialize the Rx event handlers */
28438 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28439 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28440 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28441 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28442 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28443 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28444 -
28445 /* Get BNA's resource requirement for one Rx object */
28446 spin_lock_irqsave(&bnad->bna_lock, flags);
28447 bna_rx_res_req(rx_config, res_info);
28448 diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28449 --- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28450 +++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28451 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28452 int rc = 0;
28453 u32 magic, csum;
28454
28455 + pax_track_stack();
28456 +
28457 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28458 goto test_nvram_done;
28459
28460 diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
28461 --- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28462 +++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28463 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28464 int i, rc;
28465 u32 magic, crc;
28466
28467 + pax_track_stack();
28468 +
28469 if (BP_NOMCP(bp))
28470 return 0;
28471
28472 diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
28473 --- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28474 +++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28475 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28476 */
28477 struct l2t_skb_cb {
28478 arp_failure_handler_func arp_failure_handler;
28479 -};
28480 +} __no_const;
28481
28482 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28483
28484 diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
28485 --- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28486 +++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28487 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28488 unsigned int nchan = adap->params.nports;
28489 struct msix_entry entries[MAX_INGQ + 1];
28490
28491 + pax_track_stack();
28492 +
28493 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28494 entries[i].entry = i;
28495
28496 diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
28497 --- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28498 +++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28499 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28500 u8 vpd[VPD_LEN], csum;
28501 unsigned int vpdr_len, kw_offset, id_len;
28502
28503 + pax_track_stack();
28504 +
28505 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28506 if (ret < 0)
28507 return ret;
28508 diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
28509 --- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28510 +++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28511 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28512 {
28513 struct e1000_hw *hw = &adapter->hw;
28514 struct e1000_mac_info *mac = &hw->mac;
28515 - struct e1000_mac_operations *func = &mac->ops;
28516 + e1000_mac_operations_no_const *func = &mac->ops;
28517 u32 swsm = 0;
28518 u32 swsm2 = 0;
28519 bool force_clear_smbi = false;
28520 diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
28521 --- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28522 +++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28523 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28524 {
28525 struct e1000_hw *hw = &adapter->hw;
28526 struct e1000_mac_info *mac = &hw->mac;
28527 - struct e1000_mac_operations *func = &mac->ops;
28528 + e1000_mac_operations_no_const *func = &mac->ops;
28529
28530 /* Set media type */
28531 switch (adapter->pdev->device) {
28532 diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
28533 --- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28534 +++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28535 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28536 void (*write_vfta)(struct e1000_hw *, u32, u32);
28537 s32 (*read_mac_addr)(struct e1000_hw *);
28538 };
28539 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28540
28541 /* Function pointers for the PHY. */
28542 struct e1000_phy_operations {
28543 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28544 void (*power_up)(struct e1000_hw *);
28545 void (*power_down)(struct e1000_hw *);
28546 };
28547 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28548
28549 /* Function pointers for the NVM. */
28550 struct e1000_nvm_operations {
28551 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28552 s32 (*validate)(struct e1000_hw *);
28553 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28554 };
28555 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28556
28557 struct e1000_mac_info {
28558 - struct e1000_mac_operations ops;
28559 + e1000_mac_operations_no_const ops;
28560 u8 addr[ETH_ALEN];
28561 u8 perm_addr[ETH_ALEN];
28562
28563 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28564 };
28565
28566 struct e1000_phy_info {
28567 - struct e1000_phy_operations ops;
28568 + e1000_phy_operations_no_const ops;
28569
28570 enum e1000_phy_type type;
28571
28572 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28573 };
28574
28575 struct e1000_nvm_info {
28576 - struct e1000_nvm_operations ops;
28577 + e1000_nvm_operations_no_const ops;
28578
28579 enum e1000_nvm_type type;
28580 enum e1000_nvm_override override;
28581 diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
28582 --- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28583 +++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28584 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28585 unsigned char buf[512];
28586 int count1;
28587
28588 + pax_track_stack();
28589 +
28590 if (!count)
28591 return;
28592
28593 diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
28594 --- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28595 +++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28596 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28597 s32 (*read_mac_addr)(struct e1000_hw *);
28598 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28599 };
28600 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28601
28602 struct e1000_phy_operations {
28603 s32 (*acquire)(struct e1000_hw *);
28604 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28605 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28606 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28607 };
28608 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28609
28610 struct e1000_nvm_operations {
28611 s32 (*acquire)(struct e1000_hw *);
28612 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28613 s32 (*update)(struct e1000_hw *);
28614 s32 (*validate)(struct e1000_hw *);
28615 };
28616 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28617
28618 struct e1000_info {
28619 s32 (*get_invariants)(struct e1000_hw *);
28620 @@ -350,7 +353,7 @@ struct e1000_info {
28621 extern const struct e1000_info e1000_82575_info;
28622
28623 struct e1000_mac_info {
28624 - struct e1000_mac_operations ops;
28625 + e1000_mac_operations_no_const ops;
28626
28627 u8 addr[6];
28628 u8 perm_addr[6];
28629 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28630 };
28631
28632 struct e1000_phy_info {
28633 - struct e1000_phy_operations ops;
28634 + e1000_phy_operations_no_const ops;
28635
28636 enum e1000_phy_type type;
28637
28638 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28639 };
28640
28641 struct e1000_nvm_info {
28642 - struct e1000_nvm_operations ops;
28643 + e1000_nvm_operations_no_const ops;
28644 enum e1000_nvm_type type;
28645 enum e1000_nvm_override override;
28646
28647 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28648 s32 (*check_for_ack)(struct e1000_hw *, u16);
28649 s32 (*check_for_rst)(struct e1000_hw *, u16);
28650 };
28651 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28652
28653 struct e1000_mbx_stats {
28654 u32 msgs_tx;
28655 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28656 };
28657
28658 struct e1000_mbx_info {
28659 - struct e1000_mbx_operations ops;
28660 + e1000_mbx_operations_no_const ops;
28661 struct e1000_mbx_stats stats;
28662 u32 timeout;
28663 u32 usec_delay;
28664 diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
28665 --- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28666 +++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28667 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28668 s32 (*read_mac_addr)(struct e1000_hw *);
28669 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28670 };
28671 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28672
28673 struct e1000_mac_info {
28674 - struct e1000_mac_operations ops;
28675 + e1000_mac_operations_no_const ops;
28676 u8 addr[6];
28677 u8 perm_addr[6];
28678
28679 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28680 s32 (*check_for_ack)(struct e1000_hw *);
28681 s32 (*check_for_rst)(struct e1000_hw *);
28682 };
28683 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28684
28685 struct e1000_mbx_stats {
28686 u32 msgs_tx;
28687 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28688 };
28689
28690 struct e1000_mbx_info {
28691 - struct e1000_mbx_operations ops;
28692 + e1000_mbx_operations_no_const ops;
28693 struct e1000_mbx_stats stats;
28694 u32 timeout;
28695 u32 usec_delay;
28696 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
28697 --- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28698 +++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28699 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28700 u32 rctl;
28701 int i;
28702
28703 + pax_track_stack();
28704 +
28705 /* Check for Promiscuous and All Multicast modes */
28706
28707 rctl = IXGB_READ_REG(hw, RCTL);
28708 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
28709 --- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28710 +++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28711 @@ -261,6 +261,9 @@ void __devinit
28712 ixgb_check_options(struct ixgb_adapter *adapter)
28713 {
28714 int bd = adapter->bd_number;
28715 +
28716 + pax_track_stack();
28717 +
28718 if (bd >= IXGB_MAX_NIC) {
28719 pr_notice("Warning: no configuration for board #%i\n", bd);
28720 pr_notice("Using defaults for all values\n");
28721 diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
28722 --- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28723 +++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28724 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28725 s32 (*update_checksum)(struct ixgbe_hw *);
28726 u16 (*calc_checksum)(struct ixgbe_hw *);
28727 };
28728 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28729
28730 struct ixgbe_mac_operations {
28731 s32 (*init_hw)(struct ixgbe_hw *);
28732 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28733 /* Flow Control */
28734 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28735 };
28736 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28737
28738 struct ixgbe_phy_operations {
28739 s32 (*identify)(struct ixgbe_hw *);
28740 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28741 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28742 s32 (*check_overtemp)(struct ixgbe_hw *);
28743 };
28744 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28745
28746 struct ixgbe_eeprom_info {
28747 - struct ixgbe_eeprom_operations ops;
28748 + ixgbe_eeprom_operations_no_const ops;
28749 enum ixgbe_eeprom_type type;
28750 u32 semaphore_delay;
28751 u16 word_size;
28752 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28753
28754 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28755 struct ixgbe_mac_info {
28756 - struct ixgbe_mac_operations ops;
28757 + ixgbe_mac_operations_no_const ops;
28758 enum ixgbe_mac_type type;
28759 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28760 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28761 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28762 };
28763
28764 struct ixgbe_phy_info {
28765 - struct ixgbe_phy_operations ops;
28766 + ixgbe_phy_operations_no_const ops;
28767 struct mdio_if_info mdio;
28768 enum ixgbe_phy_type type;
28769 u32 id;
28770 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28771 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28772 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28773 };
28774 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28775
28776 struct ixgbe_mbx_stats {
28777 u32 msgs_tx;
28778 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28779 };
28780
28781 struct ixgbe_mbx_info {
28782 - struct ixgbe_mbx_operations ops;
28783 + ixgbe_mbx_operations_no_const ops;
28784 struct ixgbe_mbx_stats stats;
28785 u32 timeout;
28786 u32 usec_delay;
28787 diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
28788 --- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28789 +++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28790 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28791 s32 (*clear_vfta)(struct ixgbe_hw *);
28792 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28793 };
28794 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28795
28796 enum ixgbe_mac_type {
28797 ixgbe_mac_unknown = 0,
28798 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28799 };
28800
28801 struct ixgbe_mac_info {
28802 - struct ixgbe_mac_operations ops;
28803 + ixgbe_mac_operations_no_const ops;
28804 u8 addr[6];
28805 u8 perm_addr[6];
28806
28807 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28808 s32 (*check_for_ack)(struct ixgbe_hw *);
28809 s32 (*check_for_rst)(struct ixgbe_hw *);
28810 };
28811 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28812
28813 struct ixgbe_mbx_stats {
28814 u32 msgs_tx;
28815 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28816 };
28817
28818 struct ixgbe_mbx_info {
28819 - struct ixgbe_mbx_operations ops;
28820 + ixgbe_mbx_operations_no_const ops;
28821 struct ixgbe_mbx_stats stats;
28822 u32 timeout;
28823 u32 udelay;
28824 diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
28825 --- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28826 +++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28827 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28828 int rc;
28829 u64 counter[TOTAL_PORT_COUNTER_NUM];
28830
28831 + pax_track_stack();
28832 +
28833 mutex_lock(&hw_priv->lock);
28834 n = SWITCH_PORT_NUM;
28835 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28836 diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
28837 --- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28838 +++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28839 @@ -40,6 +40,7 @@
28840 #include <linux/dma-mapping.h>
28841 #include <linux/slab.h>
28842 #include <linux/io-mapping.h>
28843 +#include <linux/sched.h>
28844
28845 #include <linux/mlx4/device.h>
28846 #include <linux/mlx4/doorbell.h>
28847 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28848 u64 icm_size;
28849 int err;
28850
28851 + pax_track_stack();
28852 +
28853 err = mlx4_QUERY_FW(dev);
28854 if (err) {
28855 if (err == -EACCES)
28856 diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
28857 --- linux-3.0.4/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28858 +++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28859 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28860 int i, num_irqs, err;
28861 u8 first_ldg;
28862
28863 + pax_track_stack();
28864 +
28865 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28866 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28867 ldg_num_map[i] = first_ldg + i;
28868 diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
28869 --- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28870 +++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28871 @@ -82,7 +82,7 @@ static int cards_found;
28872 /*
28873 * VLB I/O addresses
28874 */
28875 -static unsigned int pcnet32_portlist[] __initdata =
28876 +static unsigned int pcnet32_portlist[] __devinitdata =
28877 { 0x300, 0x320, 0x340, 0x360, 0 };
28878
28879 static int pcnet32_debug;
28880 @@ -270,7 +270,7 @@ struct pcnet32_private {
28881 struct sk_buff **rx_skbuff;
28882 dma_addr_t *tx_dma_addr;
28883 dma_addr_t *rx_dma_addr;
28884 - struct pcnet32_access a;
28885 + struct pcnet32_access *a;
28886 spinlock_t lock; /* Guard lock */
28887 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28888 unsigned int rx_ring_size; /* current rx ring size */
28889 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28890 u16 val;
28891
28892 netif_wake_queue(dev);
28893 - val = lp->a.read_csr(ioaddr, CSR3);
28894 + val = lp->a->read_csr(ioaddr, CSR3);
28895 val &= 0x00ff;
28896 - lp->a.write_csr(ioaddr, CSR3, val);
28897 + lp->a->write_csr(ioaddr, CSR3, val);
28898 napi_enable(&lp->napi);
28899 }
28900
28901 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28902 r = mii_link_ok(&lp->mii_if);
28903 } else if (lp->chip_version >= PCNET32_79C970A) {
28904 ulong ioaddr = dev->base_addr; /* card base I/O address */
28905 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28906 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28907 } else { /* can not detect link on really old chips */
28908 r = 1;
28909 }
28910 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28911 pcnet32_netif_stop(dev);
28912
28913 spin_lock_irqsave(&lp->lock, flags);
28914 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28915 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28916
28917 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28918
28919 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28920 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28921 {
28922 struct pcnet32_private *lp = netdev_priv(dev);
28923 - struct pcnet32_access *a = &lp->a; /* access to registers */
28924 + struct pcnet32_access *a = lp->a; /* access to registers */
28925 ulong ioaddr = dev->base_addr; /* card base I/O address */
28926 struct sk_buff *skb; /* sk buff */
28927 int x, i; /* counters */
28928 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28929 pcnet32_netif_stop(dev);
28930
28931 spin_lock_irqsave(&lp->lock, flags);
28932 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28933 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28934
28935 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28936
28937 /* Reset the PCNET32 */
28938 - lp->a.reset(ioaddr);
28939 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28940 + lp->a->reset(ioaddr);
28941 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28942
28943 /* switch pcnet32 to 32bit mode */
28944 - lp->a.write_bcr(ioaddr, 20, 2);
28945 + lp->a->write_bcr(ioaddr, 20, 2);
28946
28947 /* purge & init rings but don't actually restart */
28948 pcnet32_restart(dev, 0x0000);
28949
28950 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28951 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28952
28953 /* Initialize Transmit buffers. */
28954 size = data_len + 15;
28955 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28956
28957 /* set int loopback in CSR15 */
28958 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28959 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28960 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28961
28962 teststatus = cpu_to_le16(0x8000);
28963 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28964 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28965
28966 /* Check status of descriptors */
28967 for (x = 0; x < numbuffs; x++) {
28968 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28969 }
28970 }
28971
28972 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28973 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28974 wmb();
28975 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28976 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28977 @@ -1015,7 +1015,7 @@ clean_up:
28978 pcnet32_restart(dev, CSR0_NORMAL);
28979 } else {
28980 pcnet32_purge_rx_ring(dev);
28981 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28982 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28983 }
28984 spin_unlock_irqrestore(&lp->lock, flags);
28985
28986 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28987 enum ethtool_phys_id_state state)
28988 {
28989 struct pcnet32_private *lp = netdev_priv(dev);
28990 - struct pcnet32_access *a = &lp->a;
28991 + struct pcnet32_access *a = lp->a;
28992 ulong ioaddr = dev->base_addr;
28993 unsigned long flags;
28994 int i;
28995 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28996 {
28997 int csr5;
28998 struct pcnet32_private *lp = netdev_priv(dev);
28999 - struct pcnet32_access *a = &lp->a;
29000 + struct pcnet32_access *a = lp->a;
29001 ulong ioaddr = dev->base_addr;
29002 int ticks;
29003
29004 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29005 spin_lock_irqsave(&lp->lock, flags);
29006 if (pcnet32_tx(dev)) {
29007 /* reset the chip to clear the error condition, then restart */
29008 - lp->a.reset(ioaddr);
29009 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29010 + lp->a->reset(ioaddr);
29011 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29012 pcnet32_restart(dev, CSR0_START);
29013 netif_wake_queue(dev);
29014 }
29015 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29016 __napi_complete(napi);
29017
29018 /* clear interrupt masks */
29019 - val = lp->a.read_csr(ioaddr, CSR3);
29020 + val = lp->a->read_csr(ioaddr, CSR3);
29021 val &= 0x00ff;
29022 - lp->a.write_csr(ioaddr, CSR3, val);
29023 + lp->a->write_csr(ioaddr, CSR3, val);
29024
29025 /* Set interrupt enable. */
29026 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29027 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29028
29029 spin_unlock_irqrestore(&lp->lock, flags);
29030 }
29031 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29032 int i, csr0;
29033 u16 *buff = ptr;
29034 struct pcnet32_private *lp = netdev_priv(dev);
29035 - struct pcnet32_access *a = &lp->a;
29036 + struct pcnet32_access *a = lp->a;
29037 ulong ioaddr = dev->base_addr;
29038 unsigned long flags;
29039
29040 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29041 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29042 if (lp->phymask & (1 << j)) {
29043 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29044 - lp->a.write_bcr(ioaddr, 33,
29045 + lp->a->write_bcr(ioaddr, 33,
29046 (j << 5) | i);
29047 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29048 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29049 }
29050 }
29051 }
29052 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29053 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29054 lp->options |= PCNET32_PORT_FD;
29055
29056 - lp->a = *a;
29057 + lp->a = a;
29058
29059 /* prior to register_netdev, dev->name is not yet correct */
29060 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29061 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29062 if (lp->mii) {
29063 /* lp->phycount and lp->phymask are set to 0 by memset above */
29064
29065 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29066 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29067 /* scan for PHYs */
29068 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29069 unsigned short id1, id2;
29070 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29071 pr_info("Found PHY %04x:%04x at address %d\n",
29072 id1, id2, i);
29073 }
29074 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29075 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29076 if (lp->phycount > 1)
29077 lp->options |= PCNET32_PORT_MII;
29078 }
29079 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29080 }
29081
29082 /* Reset the PCNET32 */
29083 - lp->a.reset(ioaddr);
29084 + lp->a->reset(ioaddr);
29085
29086 /* switch pcnet32 to 32bit mode */
29087 - lp->a.write_bcr(ioaddr, 20, 2);
29088 + lp->a->write_bcr(ioaddr, 20, 2);
29089
29090 netif_printk(lp, ifup, KERN_DEBUG, dev,
29091 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29092 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29093 (u32) (lp->init_dma_addr));
29094
29095 /* set/reset autoselect bit */
29096 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29097 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29098 if (lp->options & PCNET32_PORT_ASEL)
29099 val |= 2;
29100 - lp->a.write_bcr(ioaddr, 2, val);
29101 + lp->a->write_bcr(ioaddr, 2, val);
29102
29103 /* handle full duplex setting */
29104 if (lp->mii_if.full_duplex) {
29105 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29106 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29107 if (lp->options & PCNET32_PORT_FD) {
29108 val |= 1;
29109 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29110 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29111 if (lp->chip_version == 0x2627)
29112 val |= 3;
29113 }
29114 - lp->a.write_bcr(ioaddr, 9, val);
29115 + lp->a->write_bcr(ioaddr, 9, val);
29116 }
29117
29118 /* set/reset GPSI bit in test register */
29119 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29120 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29121 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29122 val |= 0x10;
29123 - lp->a.write_csr(ioaddr, 124, val);
29124 + lp->a->write_csr(ioaddr, 124, val);
29125
29126 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29127 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29128 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29129 * duplex, and/or enable auto negotiation, and clear DANAS
29130 */
29131 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29132 - lp->a.write_bcr(ioaddr, 32,
29133 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29134 + lp->a->write_bcr(ioaddr, 32,
29135 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29136 /* disable Auto Negotiation, set 10Mpbs, HD */
29137 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29138 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29139 if (lp->options & PCNET32_PORT_FD)
29140 val |= 0x10;
29141 if (lp->options & PCNET32_PORT_100)
29142 val |= 0x08;
29143 - lp->a.write_bcr(ioaddr, 32, val);
29144 + lp->a->write_bcr(ioaddr, 32, val);
29145 } else {
29146 if (lp->options & PCNET32_PORT_ASEL) {
29147 - lp->a.write_bcr(ioaddr, 32,
29148 - lp->a.read_bcr(ioaddr,
29149 + lp->a->write_bcr(ioaddr, 32,
29150 + lp->a->read_bcr(ioaddr,
29151 32) | 0x0080);
29152 /* enable auto negotiate, setup, disable fd */
29153 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29154 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29155 val |= 0x20;
29156 - lp->a.write_bcr(ioaddr, 32, val);
29157 + lp->a->write_bcr(ioaddr, 32, val);
29158 }
29159 }
29160 } else {
29161 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29162 * There is really no good other way to handle multiple PHYs
29163 * other than turning off all automatics
29164 */
29165 - val = lp->a.read_bcr(ioaddr, 2);
29166 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29167 - val = lp->a.read_bcr(ioaddr, 32);
29168 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29169 + val = lp->a->read_bcr(ioaddr, 2);
29170 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29171 + val = lp->a->read_bcr(ioaddr, 32);
29172 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29173
29174 if (!(lp->options & PCNET32_PORT_ASEL)) {
29175 /* setup ecmd */
29176 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29177 ethtool_cmd_speed_set(&ecmd,
29178 (lp->options & PCNET32_PORT_100) ?
29179 SPEED_100 : SPEED_10);
29180 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29181 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29182
29183 if (lp->options & PCNET32_PORT_FD) {
29184 ecmd.duplex = DUPLEX_FULL;
29185 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29186 ecmd.duplex = DUPLEX_HALF;
29187 bcr9 |= ~(1 << 0);
29188 }
29189 - lp->a.write_bcr(ioaddr, 9, bcr9);
29190 + lp->a->write_bcr(ioaddr, 9, bcr9);
29191 }
29192
29193 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29194 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29195
29196 #ifdef DO_DXSUFLO
29197 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29198 - val = lp->a.read_csr(ioaddr, CSR3);
29199 + val = lp->a->read_csr(ioaddr, CSR3);
29200 val |= 0x40;
29201 - lp->a.write_csr(ioaddr, CSR3, val);
29202 + lp->a->write_csr(ioaddr, CSR3, val);
29203 }
29204 #endif
29205
29206 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29207 napi_enable(&lp->napi);
29208
29209 /* Re-initialize the PCNET32, and start it when done. */
29210 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29211 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29212 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29213 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29214
29215 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29216 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29217 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29218 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29219
29220 netif_start_queue(dev);
29221
29222 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29223
29224 i = 0;
29225 while (i++ < 100)
29226 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29227 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29228 break;
29229 /*
29230 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29231 * reports that doing so triggers a bug in the '974.
29232 */
29233 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29234 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29235
29236 netif_printk(lp, ifup, KERN_DEBUG, dev,
29237 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29238 i,
29239 (u32) (lp->init_dma_addr),
29240 - lp->a.read_csr(ioaddr, CSR0));
29241 + lp->a->read_csr(ioaddr, CSR0));
29242
29243 spin_unlock_irqrestore(&lp->lock, flags);
29244
29245 @@ -2218,7 +2218,7 @@ err_free_ring:
29246 * Switch back to 16bit mode to avoid problems with dumb
29247 * DOS packet driver after a warm reboot
29248 */
29249 - lp->a.write_bcr(ioaddr, 20, 4);
29250 + lp->a->write_bcr(ioaddr, 20, 4);
29251
29252 err_free_irq:
29253 spin_unlock_irqrestore(&lp->lock, flags);
29254 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29255
29256 /* wait for stop */
29257 for (i = 0; i < 100; i++)
29258 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29259 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29260 break;
29261
29262 if (i >= 100)
29263 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29264 return;
29265
29266 /* ReInit Ring */
29267 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29268 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29269 i = 0;
29270 while (i++ < 1000)
29271 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29272 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29273 break;
29274
29275 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29276 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29277 }
29278
29279 static void pcnet32_tx_timeout(struct net_device *dev)
29280 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29281 /* Transmitter timeout, serious problems. */
29282 if (pcnet32_debug & NETIF_MSG_DRV)
29283 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29284 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29285 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29286 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29287 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29288 dev->stats.tx_errors++;
29289 if (netif_msg_tx_err(lp)) {
29290 int i;
29291 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29292
29293 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29294 "%s() called, csr0 %4.4x\n",
29295 - __func__, lp->a.read_csr(ioaddr, CSR0));
29296 + __func__, lp->a->read_csr(ioaddr, CSR0));
29297
29298 /* Default status -- will not enable Successful-TxDone
29299 * interrupt when that option is available to us.
29300 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29301 dev->stats.tx_bytes += skb->len;
29302
29303 /* Trigger an immediate send poll. */
29304 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29305 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29306
29307 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29308 lp->tx_full = 1;
29309 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29310
29311 spin_lock(&lp->lock);
29312
29313 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29314 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29315 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29316 if (csr0 == 0xffff)
29317 break; /* PCMCIA remove happened */
29318 /* Acknowledge all of the current interrupt sources ASAP. */
29319 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29320 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29321
29322 netif_printk(lp, intr, KERN_DEBUG, dev,
29323 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29324 - csr0, lp->a.read_csr(ioaddr, CSR0));
29325 + csr0, lp->a->read_csr(ioaddr, CSR0));
29326
29327 /* Log misc errors. */
29328 if (csr0 & 0x4000)
29329 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29330 if (napi_schedule_prep(&lp->napi)) {
29331 u16 val;
29332 /* set interrupt masks */
29333 - val = lp->a.read_csr(ioaddr, CSR3);
29334 + val = lp->a->read_csr(ioaddr, CSR3);
29335 val |= 0x5f00;
29336 - lp->a.write_csr(ioaddr, CSR3, val);
29337 + lp->a->write_csr(ioaddr, CSR3, val);
29338
29339 __napi_schedule(&lp->napi);
29340 break;
29341 }
29342 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29343 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29344 }
29345
29346 netif_printk(lp, intr, KERN_DEBUG, dev,
29347 "exiting interrupt, csr0=%#4.4x\n",
29348 - lp->a.read_csr(ioaddr, CSR0));
29349 + lp->a->read_csr(ioaddr, CSR0));
29350
29351 spin_unlock(&lp->lock);
29352
29353 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29354
29355 spin_lock_irqsave(&lp->lock, flags);
29356
29357 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29358 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29359
29360 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29361 "Shutting down ethercard, status was %2.2x\n",
29362 - lp->a.read_csr(ioaddr, CSR0));
29363 + lp->a->read_csr(ioaddr, CSR0));
29364
29365 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29366 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29367 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29368
29369 /*
29370 * Switch back to 16bit mode to avoid problems with dumb
29371 * DOS packet driver after a warm reboot
29372 */
29373 - lp->a.write_bcr(ioaddr, 20, 4);
29374 + lp->a->write_bcr(ioaddr, 20, 4);
29375
29376 spin_unlock_irqrestore(&lp->lock, flags);
29377
29378 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29379 unsigned long flags;
29380
29381 spin_lock_irqsave(&lp->lock, flags);
29382 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29383 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29384 spin_unlock_irqrestore(&lp->lock, flags);
29385
29386 return &dev->stats;
29387 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29388 if (dev->flags & IFF_ALLMULTI) {
29389 ib->filter[0] = cpu_to_le32(~0U);
29390 ib->filter[1] = cpu_to_le32(~0U);
29391 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29392 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29393 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29394 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29395 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29396 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29397 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29398 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29399 return;
29400 }
29401 /* clear the multicast filter */
29402 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29403 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29404 }
29405 for (i = 0; i < 4; i++)
29406 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29407 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29408 le16_to_cpu(mcast_table[i]));
29409 }
29410
29411 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29412
29413 spin_lock_irqsave(&lp->lock, flags);
29414 suspended = pcnet32_suspend(dev, &flags, 0);
29415 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29416 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29417 if (dev->flags & IFF_PROMISC) {
29418 /* Log any net taps. */
29419 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29420 lp->init_block->mode =
29421 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29422 7);
29423 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29424 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29425 } else {
29426 lp->init_block->mode =
29427 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29428 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29429 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29430 pcnet32_load_multicast(dev);
29431 }
29432
29433 if (suspended) {
29434 int csr5;
29435 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29436 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29437 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29438 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29439 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29440 } else {
29441 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29442 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29443 pcnet32_restart(dev, CSR0_NORMAL);
29444 netif_wake_queue(dev);
29445 }
29446 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29447 if (!lp->mii)
29448 return 0;
29449
29450 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29451 - val_out = lp->a.read_bcr(ioaddr, 34);
29452 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29453 + val_out = lp->a->read_bcr(ioaddr, 34);
29454
29455 return val_out;
29456 }
29457 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29458 if (!lp->mii)
29459 return;
29460
29461 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29462 - lp->a.write_bcr(ioaddr, 34, val);
29463 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29464 + lp->a->write_bcr(ioaddr, 34, val);
29465 }
29466
29467 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29468 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29469 curr_link = mii_link_ok(&lp->mii_if);
29470 } else {
29471 ulong ioaddr = dev->base_addr; /* card base I/O address */
29472 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29473 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29474 }
29475 if (!curr_link) {
29476 if (prev_link || verbose) {
29477 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29478 (ecmd.duplex == DUPLEX_FULL)
29479 ? "full" : "half");
29480 }
29481 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29482 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29483 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29484 if (lp->mii_if.full_duplex)
29485 bcr9 |= (1 << 0);
29486 else
29487 bcr9 &= ~(1 << 0);
29488 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29489 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29490 }
29491 } else {
29492 netif_info(lp, link, dev, "link up\n");
29493 diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
29494 --- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29495 +++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29496 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29497 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29498 struct ppp_stats stats;
29499 struct ppp_comp_stats cstats;
29500 - char *vers;
29501
29502 switch (cmd) {
29503 case SIOCGPPPSTATS:
29504 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29505 break;
29506
29507 case SIOCGPPPVER:
29508 - vers = PPP_VERSION;
29509 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29510 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29511 break;
29512 err = 0;
29513 break;
29514 diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
29515 --- linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29516 +++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29517 @@ -645,12 +645,12 @@ struct rtl8169_private {
29518 struct mdio_ops {
29519 void (*write)(void __iomem *, int, int);
29520 int (*read)(void __iomem *, int);
29521 - } mdio_ops;
29522 + } __no_const mdio_ops;
29523
29524 struct pll_power_ops {
29525 void (*down)(struct rtl8169_private *);
29526 void (*up)(struct rtl8169_private *);
29527 - } pll_power_ops;
29528 + } __no_const pll_power_ops;
29529
29530 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29531 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29532 diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
29533 --- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29534 +++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29535 @@ -134,6 +134,7 @@
29536 #define CHIPREV_ID_5750_A0 0x4000
29537 #define CHIPREV_ID_5750_A1 0x4001
29538 #define CHIPREV_ID_5750_A3 0x4003
29539 +#define CHIPREV_ID_5750_C1 0x4201
29540 #define CHIPREV_ID_5750_C2 0x4202
29541 #define CHIPREV_ID_5752_A0_HW 0x5000
29542 #define CHIPREV_ID_5752_A0 0x6000
29543 diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
29544 --- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29545 +++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29546 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29547
29548 static int __init abyss_init (void)
29549 {
29550 - abyss_netdev_ops = tms380tr_netdev_ops;
29551 + pax_open_kernel();
29552 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29553
29554 - abyss_netdev_ops.ndo_open = abyss_open;
29555 - abyss_netdev_ops.ndo_stop = abyss_close;
29556 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29557 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29558 + pax_close_kernel();
29559
29560 return pci_register_driver(&abyss_driver);
29561 }
29562 diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
29563 --- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29564 +++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29565 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29566
29567 static int __init madgemc_init (void)
29568 {
29569 - madgemc_netdev_ops = tms380tr_netdev_ops;
29570 - madgemc_netdev_ops.ndo_open = madgemc_open;
29571 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29572 + pax_open_kernel();
29573 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29574 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29575 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29576 + pax_close_kernel();
29577
29578 return mca_register_driver (&madgemc_driver);
29579 }
29580 diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
29581 --- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29582 +++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29583 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29584 struct platform_device *pdev;
29585 int i, num = 0, err = 0;
29586
29587 - proteon_netdev_ops = tms380tr_netdev_ops;
29588 - proteon_netdev_ops.ndo_open = proteon_open;
29589 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29590 + pax_open_kernel();
29591 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29592 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29593 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29594 + pax_close_kernel();
29595
29596 err = platform_driver_register(&proteon_driver);
29597 if (err)
29598 diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
29599 --- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29600 +++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29601 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29602 struct platform_device *pdev;
29603 int i, num = 0, err = 0;
29604
29605 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29606 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29607 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29608 + pax_open_kernel();
29609 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29610 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29611 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29612 + pax_close_kernel();
29613
29614 err = platform_driver_register(&sk_isa_driver);
29615 if (err)
29616 diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
29617 --- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29618 +++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29619 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29620 struct de_srom_info_leaf *il;
29621 void *bufp;
29622
29623 + pax_track_stack();
29624 +
29625 /* download entire eeprom */
29626 for (i = 0; i < DE_EEPROM_WORDS; i++)
29627 ((__le16 *)ee_data)[i] =
29628 diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
29629 --- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29630 +++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29631 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29632 for (i=0; i<ETH_ALEN; i++) {
29633 tmp.addr[i] = dev->dev_addr[i];
29634 }
29635 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29636 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29637 break;
29638
29639 case DE4X5_SET_HWADDR: /* Set the hardware address */
29640 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29641 spin_lock_irqsave(&lp->lock, flags);
29642 memcpy(&statbuf, &lp->pktStats, ioc->len);
29643 spin_unlock_irqrestore(&lp->lock, flags);
29644 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29645 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29646 return -EFAULT;
29647 break;
29648 }
29649 diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
29650 --- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29651 +++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29652 @@ -71,7 +71,7 @@
29653 #include <asm/byteorder.h>
29654 #include <linux/serial_core.h>
29655 #include <linux/serial.h>
29656 -
29657 +#include <asm/local.h>
29658
29659 #define MOD_AUTHOR "Option Wireless"
29660 #define MOD_DESCRIPTION "USB High Speed Option driver"
29661 @@ -257,7 +257,7 @@ struct hso_serial {
29662
29663 /* from usb_serial_port */
29664 struct tty_struct *tty;
29665 - int open_count;
29666 + local_t open_count;
29667 spinlock_t serial_lock;
29668
29669 int (*write_data) (struct hso_serial *serial);
29670 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29671 struct urb *urb;
29672
29673 urb = serial->rx_urb[0];
29674 - if (serial->open_count > 0) {
29675 + if (local_read(&serial->open_count) > 0) {
29676 count = put_rxbuf_data(urb, serial);
29677 if (count == -1)
29678 return;
29679 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29680 DUMP1(urb->transfer_buffer, urb->actual_length);
29681
29682 /* Anyone listening? */
29683 - if (serial->open_count == 0)
29684 + if (local_read(&serial->open_count) == 0)
29685 return;
29686
29687 if (status == 0) {
29688 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29689 spin_unlock_irq(&serial->serial_lock);
29690
29691 /* check for port already opened, if not set the termios */
29692 - serial->open_count++;
29693 - if (serial->open_count == 1) {
29694 + if (local_inc_return(&serial->open_count) == 1) {
29695 serial->rx_state = RX_IDLE;
29696 /* Force default termio settings */
29697 _hso_serial_set_termios(tty, NULL);
29698 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29699 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29700 if (result) {
29701 hso_stop_serial_device(serial->parent);
29702 - serial->open_count--;
29703 + local_dec(&serial->open_count);
29704 kref_put(&serial->parent->ref, hso_serial_ref_free);
29705 }
29706 } else {
29707 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29708
29709 /* reset the rts and dtr */
29710 /* do the actual close */
29711 - serial->open_count--;
29712 + local_dec(&serial->open_count);
29713
29714 - if (serial->open_count <= 0) {
29715 - serial->open_count = 0;
29716 + if (local_read(&serial->open_count) <= 0) {
29717 + local_set(&serial->open_count, 0);
29718 spin_lock_irq(&serial->serial_lock);
29719 if (serial->tty == tty) {
29720 serial->tty->driver_data = NULL;
29721 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29722
29723 /* the actual setup */
29724 spin_lock_irqsave(&serial->serial_lock, flags);
29725 - if (serial->open_count)
29726 + if (local_read(&serial->open_count))
29727 _hso_serial_set_termios(tty, old);
29728 else
29729 tty->termios = old;
29730 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29731 D1("Pending read interrupt on port %d\n", i);
29732 spin_lock(&serial->serial_lock);
29733 if (serial->rx_state == RX_IDLE &&
29734 - serial->open_count > 0) {
29735 + local_read(&serial->open_count) > 0) {
29736 /* Setup and send a ctrl req read on
29737 * port i */
29738 if (!serial->rx_urb_filled[0]) {
29739 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29740 /* Start all serial ports */
29741 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29742 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29743 - if (dev2ser(serial_table[i])->open_count) {
29744 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29745 result =
29746 hso_start_serial_device(serial_table[i], GFP_NOIO);
29747 hso_kick_transmit(dev2ser(serial_table[i]));
29748 diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29749 --- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29750 +++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29751 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29752 * Return with error code if any of the queue indices
29753 * is out of range
29754 */
29755 - if (p->ring_index[i] < 0 ||
29756 - p->ring_index[i] >= adapter->num_rx_queues)
29757 + if (p->ring_index[i] >= adapter->num_rx_queues)
29758 return -EINVAL;
29759 }
29760
29761 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
29762 --- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29763 +++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29764 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29765 void (*link_down)(struct __vxge_hw_device *devh);
29766 void (*crit_err)(struct __vxge_hw_device *devh,
29767 enum vxge_hw_event type, u64 ext_data);
29768 -};
29769 +} __no_const;
29770
29771 /*
29772 * struct __vxge_hw_blockpool_entry - Block private data structure
29773 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
29774 --- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29775 +++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29776 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29777 struct sk_buff *completed[NR_SKB_COMPLETED];
29778 int more;
29779
29780 + pax_track_stack();
29781 +
29782 do {
29783 more = 0;
29784 skb_ptr = completed;
29785 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29786 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29787 int index;
29788
29789 + pax_track_stack();
29790 +
29791 /*
29792 * Filling
29793 * - itable with bucket numbers
29794 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
29795 --- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29796 +++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29797 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29798 struct vxge_hw_mempool_dma *dma_object,
29799 u32 index,
29800 u32 is_last);
29801 -};
29802 +} __no_const;
29803
29804 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29805 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29806 diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
29807 --- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29808 +++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29809 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29810 unsigned char hex[1024],
29811 * phex = hex;
29812
29813 + pax_track_stack();
29814 +
29815 if (len >= (sizeof(hex) / 2))
29816 len = (sizeof(hex) / 2) - 1;
29817
29818 diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
29819 --- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29820 +++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29821 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29822
29823 static int x25_open(struct net_device *dev)
29824 {
29825 - struct lapb_register_struct cb;
29826 + static struct lapb_register_struct cb = {
29827 + .connect_confirmation = x25_connected,
29828 + .connect_indication = x25_connected,
29829 + .disconnect_confirmation = x25_disconnected,
29830 + .disconnect_indication = x25_disconnected,
29831 + .data_indication = x25_data_indication,
29832 + .data_transmit = x25_data_transmit
29833 + };
29834 int result;
29835
29836 - cb.connect_confirmation = x25_connected;
29837 - cb.connect_indication = x25_connected;
29838 - cb.disconnect_confirmation = x25_disconnected;
29839 - cb.disconnect_indication = x25_disconnected;
29840 - cb.data_indication = x25_data_indication;
29841 - cb.data_transmit = x25_data_transmit;
29842 -
29843 result = lapb_register(dev, &cb);
29844 if (result != LAPB_OK)
29845 return result;
29846 diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
29847 --- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29848 +++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29849 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29850 int do_autopm = 1;
29851 DECLARE_COMPLETION_ONSTACK(notif_completion);
29852
29853 + pax_track_stack();
29854 +
29855 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29856 i2400m, ack, ack_size);
29857 BUG_ON(_ack == i2400m->bm_ack_buf);
29858 diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
29859 --- linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29860 +++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29861 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29862 BSSListElement * loop_net;
29863 BSSListElement * tmp_net;
29864
29865 + pax_track_stack();
29866 +
29867 /* Blow away current list of scan results */
29868 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29869 list_move_tail (&loop_net->list, &ai->network_free_list);
29870 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29871 WepKeyRid wkr;
29872 int rc;
29873
29874 + pax_track_stack();
29875 +
29876 memset( &mySsid, 0, sizeof( mySsid ) );
29877 kfree (ai->flash);
29878 ai->flash = NULL;
29879 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29880 __le32 *vals = stats.vals;
29881 int len;
29882
29883 + pax_track_stack();
29884 +
29885 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29886 return -ENOMEM;
29887 data = file->private_data;
29888 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29889 /* If doLoseSync is not 1, we won't do a Lose Sync */
29890 int doLoseSync = -1;
29891
29892 + pax_track_stack();
29893 +
29894 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29895 return -ENOMEM;
29896 data = file->private_data;
29897 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29898 int i;
29899 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29900
29901 + pax_track_stack();
29902 +
29903 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29904 if (!qual)
29905 return -ENOMEM;
29906 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29907 CapabilityRid cap_rid;
29908 __le32 *vals = stats_rid.vals;
29909
29910 + pax_track_stack();
29911 +
29912 /* Get stats out of the card */
29913 clear_bit(JOB_WSTATS, &local->jobs);
29914 if (local->power.event) {
29915 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
29916 --- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29917 +++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29918 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29919 unsigned int v;
29920 u64 tsf;
29921
29922 + pax_track_stack();
29923 +
29924 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29925 len += snprintf(buf+len, sizeof(buf)-len,
29926 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29927 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29928 unsigned int len = 0;
29929 unsigned int i;
29930
29931 + pax_track_stack();
29932 +
29933 len += snprintf(buf+len, sizeof(buf)-len,
29934 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29935
29936 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29937 unsigned int i;
29938 unsigned int v;
29939
29940 + pax_track_stack();
29941 +
29942 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29943 sc->ah->ah_ant_mode);
29944 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29945 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29946 unsigned int len = 0;
29947 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29948
29949 + pax_track_stack();
29950 +
29951 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29952 sc->bssidmask);
29953 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29954 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29955 unsigned int len = 0;
29956 int i;
29957
29958 + pax_track_stack();
29959 +
29960 len += snprintf(buf+len, sizeof(buf)-len,
29961 "RX\n---------------------\n");
29962 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29963 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29964 char buf[700];
29965 unsigned int len = 0;
29966
29967 + pax_track_stack();
29968 +
29969 len += snprintf(buf+len, sizeof(buf)-len,
29970 "HW has PHY error counters:\t%s\n",
29971 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29972 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29973 struct ath5k_buf *bf, *bf0;
29974 int i, n;
29975
29976 + pax_track_stack();
29977 +
29978 len += snprintf(buf+len, sizeof(buf)-len,
29979 "available txbuffers: %d\n", sc->txbuf_len);
29980
29981 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29982 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29983 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29984 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29985 int i, im, j;
29986 int nmeasurement;
29987
29988 + pax_track_stack();
29989 +
29990 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29991 if (ah->txchainmask & (1 << i))
29992 num_chains++;
29993 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29994 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29995 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29996 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29997 int theta_low_bin = 0;
29998 int i;
29999
30000 + pax_track_stack();
30001 +
30002 /* disregard any bin that contains <= 16 samples */
30003 thresh_accum_cnt = 16;
30004 scale_factor = 5;
30005 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30006 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30007 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30008 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30009 char buf[512];
30010 unsigned int len = 0;
30011
30012 + pax_track_stack();
30013 +
30014 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30015 len += snprintf(buf + len, sizeof(buf) - len,
30016 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30017 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30018 u8 addr[ETH_ALEN];
30019 u32 tmp;
30020
30021 + pax_track_stack();
30022 +
30023 len += snprintf(buf + len, sizeof(buf) - len,
30024 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30025 wiphy_name(sc->hw->wiphy),
30026 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30027 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30028 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30029 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30030 unsigned int len = 0;
30031 int ret = 0;
30032
30033 + pax_track_stack();
30034 +
30035 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30036
30037 ath9k_htc_ps_wakeup(priv);
30038 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30039 unsigned int len = 0;
30040 int ret = 0;
30041
30042 + pax_track_stack();
30043 +
30044 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30045
30046 ath9k_htc_ps_wakeup(priv);
30047 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30048 unsigned int len = 0;
30049 int ret = 0;
30050
30051 + pax_track_stack();
30052 +
30053 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30054
30055 ath9k_htc_ps_wakeup(priv);
30056 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30057 char buf[512];
30058 unsigned int len = 0;
30059
30060 + pax_track_stack();
30061 +
30062 len += snprintf(buf + len, sizeof(buf) - len,
30063 "%20s : %10u\n", "Buffers queued",
30064 priv->debug.tx_stats.buf_queued);
30065 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30066 char buf[512];
30067 unsigned int len = 0;
30068
30069 + pax_track_stack();
30070 +
30071 spin_lock_bh(&priv->tx.tx_lock);
30072
30073 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30074 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30075 char buf[512];
30076 unsigned int len = 0;
30077
30078 + pax_track_stack();
30079 +
30080 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30081 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30082
30083 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30084 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30085 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30086 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30087
30088 /* ANI */
30089 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30090 -};
30091 +} __no_const;
30092
30093 /**
30094 * struct ath_hw_ops - callbacks used by hardware code and driver code
30095 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30096 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30097 struct ath_hw_antcomb_conf *antconf);
30098
30099 -};
30100 +} __no_const;
30101
30102 struct ath_nf_limits {
30103 s16 max;
30104 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30105 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30106
30107 struct ath_hw {
30108 - struct ath_ops reg_ops;
30109 + ath_ops_no_const reg_ops;
30110
30111 struct ieee80211_hw *hw;
30112 struct ath_common common;
30113 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30114 --- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30115 +++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30116 @@ -121,6 +121,7 @@ struct ath_ops {
30117 void (*write_flush) (void *);
30118 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30119 };
30120 +typedef struct ath_ops __no_const ath_ops_no_const;
30121
30122 struct ath_common;
30123 struct ath_bus_ops;
30124 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30125 --- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30126 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30127 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30128 int err;
30129 DECLARE_SSID_BUF(ssid);
30130
30131 + pax_track_stack();
30132 +
30133 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30134
30135 if (ssid_len)
30136 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30137 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30138 int err;
30139
30140 + pax_track_stack();
30141 +
30142 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30143 idx, keylen, len);
30144
30145 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30146 --- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30147 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30148 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30149 unsigned long flags;
30150 DECLARE_SSID_BUF(ssid);
30151
30152 + pax_track_stack();
30153 +
30154 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30155 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30156 print_ssid(ssid, info_element->data, info_element->len),
30157 diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30158 --- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30159 +++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30160 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30161 */
30162 if (iwl3945_mod_params.disable_hw_scan) {
30163 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30164 - iwl3945_hw_ops.hw_scan = NULL;
30165 + pax_open_kernel();
30166 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30167 + pax_close_kernel();
30168 }
30169
30170 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30171 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30172 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30173 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30174 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30175 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30176 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30177
30178 + pax_track_stack();
30179 +
30180 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30181
30182 /* Treat uninitialized rate scaling data same as non-existing. */
30183 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30184 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30185 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30186
30187 + pax_track_stack();
30188 +
30189 /* Override starting rate (index 0) if needed for debug purposes */
30190 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30191
30192 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30193 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30194 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30195 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30196 int pos = 0;
30197 const size_t bufsz = sizeof(buf);
30198
30199 + pax_track_stack();
30200 +
30201 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30202 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30203 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30204 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30205 char buf[256 * NUM_IWL_RXON_CTX];
30206 const size_t bufsz = sizeof(buf);
30207
30208 + pax_track_stack();
30209 +
30210 for_each_context(priv, ctx) {
30211 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30212 ctx->ctxid);
30213 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30214 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30215 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30216 @@ -68,8 +68,8 @@ do {
30217 } while (0)
30218
30219 #else
30220 -#define IWL_DEBUG(__priv, level, fmt, args...)
30221 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30222 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30223 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30224 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30225 const void *p, u32 len)
30226 {}
30227 diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30228 --- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30229 +++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30230 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30231 int buf_len = 512;
30232 size_t len = 0;
30233
30234 + pax_track_stack();
30235 +
30236 if (*ppos != 0)
30237 return 0;
30238 if (count < sizeof(buf))
30239 diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30240 --- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30241 +++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30242 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30243 return -EINVAL;
30244
30245 if (fake_hw_scan) {
30246 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30247 - mac80211_hwsim_ops.sw_scan_start = NULL;
30248 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30249 + pax_open_kernel();
30250 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30251 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30252 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30253 + pax_close_kernel();
30254 }
30255
30256 spin_lock_init(&hwsim_radio_lock);
30257 diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30258 --- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30259 +++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30260 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30261
30262 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30263
30264 - if (rts_threshold < 0 || rts_threshold > 2347)
30265 + if (rts_threshold > 2347)
30266 rts_threshold = 2347;
30267
30268 tmp = cpu_to_le32(rts_threshold);
30269 diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30270 --- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30271 +++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30272 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30273 u8 rfpath;
30274 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30275
30276 + pax_track_stack();
30277 +
30278 precommoncmdcnt = 0;
30279 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30280 MAX_PRECMD_CNT,
30281 diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30282 --- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30283 +++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30284 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30285 void (*reset)(struct wl1251 *wl);
30286 void (*enable_irq)(struct wl1251 *wl);
30287 void (*disable_irq)(struct wl1251 *wl);
30288 -};
30289 +} __no_const;
30290
30291 struct wl1251 {
30292 struct ieee80211_hw *hw;
30293 diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30294 --- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30295 +++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30296 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30297 u32 chunk_len;
30298 int i;
30299
30300 + pax_track_stack();
30301 +
30302 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30303
30304 spi_message_init(&m);
30305 diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30306 --- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30307 +++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30308 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30309 if (cookie == NO_COOKIE)
30310 offset = pc;
30311 if (cookie == INVALID_COOKIE) {
30312 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30313 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30314 offset = pc;
30315 }
30316 if (cookie != last_cookie) {
30317 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30318 /* add userspace sample */
30319
30320 if (!mm) {
30321 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30322 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30323 return 0;
30324 }
30325
30326 cookie = lookup_dcookie(mm, s->eip, &offset);
30327
30328 if (cookie == INVALID_COOKIE) {
30329 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30330 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30331 return 0;
30332 }
30333
30334 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30335 /* ignore backtraces if failed to add a sample */
30336 if (state == sb_bt_start) {
30337 state = sb_bt_ignore;
30338 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30339 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30340 }
30341 }
30342 release_mm(mm);
30343 diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30344 --- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30345 +++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30346 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30347 }
30348
30349 if (buffer_pos == buffer_size) {
30350 - atomic_inc(&oprofile_stats.event_lost_overflow);
30351 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30352 return;
30353 }
30354
30355 diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30356 --- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30357 +++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30358 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30359 if (oprofile_ops.switch_events())
30360 return;
30361
30362 - atomic_inc(&oprofile_stats.multiplex_counter);
30363 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30364 start_switch_worker();
30365 }
30366
30367 diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30368 --- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30369 +++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30370 @@ -186,7 +186,7 @@ static const struct file_operations atom
30371
30372
30373 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30374 - char const *name, atomic_t *val)
30375 + char const *name, atomic_unchecked_t *val)
30376 {
30377 return __oprofilefs_create_file(sb, root, name,
30378 &atomic_ro_fops, 0444, val);
30379 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30380 --- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30381 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30382 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30383 cpu_buf->sample_invalid_eip = 0;
30384 }
30385
30386 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30387 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30388 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30389 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30390 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30391 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30392 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30393 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30394 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30395 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30396 }
30397
30398
30399 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30400 --- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30401 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30402 @@ -13,11 +13,11 @@
30403 #include <asm/atomic.h>
30404
30405 struct oprofile_stat_struct {
30406 - atomic_t sample_lost_no_mm;
30407 - atomic_t sample_lost_no_mapping;
30408 - atomic_t bt_lost_no_mapping;
30409 - atomic_t event_lost_overflow;
30410 - atomic_t multiplex_counter;
30411 + atomic_unchecked_t sample_lost_no_mm;
30412 + atomic_unchecked_t sample_lost_no_mapping;
30413 + atomic_unchecked_t bt_lost_no_mapping;
30414 + atomic_unchecked_t event_lost_overflow;
30415 + atomic_unchecked_t multiplex_counter;
30416 };
30417
30418 extern struct oprofile_stat_struct oprofile_stats;
30419 diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30420 --- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30421 +++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30422 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30423
30424 *ppos += len;
30425
30426 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30427 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30428 }
30429
30430 #ifdef CONFIG_PARPORT_1284
30431 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30432
30433 *ppos += len;
30434
30435 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30436 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30437 }
30438 #endif /* IEEE1284.3 support. */
30439
30440 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30441 --- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30442 +++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30443 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30444 int (*hardware_test) (struct slot* slot, u32 value);
30445 u8 (*get_power) (struct slot* slot);
30446 int (*set_power) (struct slot* slot, int value);
30447 -};
30448 +} __no_const;
30449
30450 struct cpci_hp_controller {
30451 unsigned int irq;
30452 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
30453 --- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30454 +++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30455 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30456
30457 void compaq_nvram_init (void __iomem *rom_start)
30458 {
30459 +
30460 +#ifndef CONFIG_PAX_KERNEXEC
30461 if (rom_start) {
30462 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30463 }
30464 +#endif
30465 +
30466 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30467
30468 /* initialize our int15 lock */
30469 diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
30470 --- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30471 +++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30472 @@ -27,9 +27,9 @@
30473 #define MODULE_PARAM_PREFIX "pcie_aspm."
30474
30475 /* Note: those are not register definitions */
30476 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30477 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30478 -#define ASPM_STATE_L1 (4) /* L1 state */
30479 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30480 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30481 +#define ASPM_STATE_L1 (4U) /* L1 state */
30482 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30483 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30484
30485 diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
30486 --- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30487 +++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30488 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30489 u32 l, sz, mask;
30490 u16 orig_cmd;
30491
30492 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30493 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30494
30495 if (!dev->mmio_always_on) {
30496 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30497 diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
30498 --- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30499 +++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30500 @@ -476,7 +476,16 @@ static const struct file_operations proc
30501 static int __init pci_proc_init(void)
30502 {
30503 struct pci_dev *dev = NULL;
30504 +
30505 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30506 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30507 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30508 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30509 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30510 +#endif
30511 +#else
30512 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30513 +#endif
30514 proc_create("devices", 0, proc_bus_pci_dir,
30515 &proc_bus_pci_dev_operations);
30516 proc_initialized = 1;
30517 diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
30518 --- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30519 +++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30520 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30521 struct pcifront_sd *sd = bus->sysdata;
30522 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30523
30524 + pax_track_stack();
30525 +
30526 if (verbose_request)
30527 dev_info(&pdev->xdev->dev,
30528 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30529 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30530 struct pcifront_sd *sd = bus->sysdata;
30531 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30532
30533 + pax_track_stack();
30534 +
30535 if (verbose_request)
30536 dev_info(&pdev->xdev->dev,
30537 "write dev=%04x:%02x:%02x.%01x - "
30538 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30539 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30540 struct msi_desc *entry;
30541
30542 + pax_track_stack();
30543 +
30544 if (nvec > SH_INFO_MAX_VEC) {
30545 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30546 " Increase SH_INFO_MAX_VEC.\n", nvec);
30547 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30548 struct pcifront_sd *sd = dev->bus->sysdata;
30549 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30550
30551 + pax_track_stack();
30552 +
30553 err = do_pci_op(pdev, &op);
30554
30555 /* What should do for error ? */
30556 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30557 struct pcifront_sd *sd = dev->bus->sysdata;
30558 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30559
30560 + pax_track_stack();
30561 +
30562 err = do_pci_op(pdev, &op);
30563 if (likely(!err)) {
30564 vector[0] = op.value;
30565 diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
30566 --- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30567 +++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30568 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30569 return 0;
30570 }
30571
30572 -void static hotkey_mask_warn_incomplete_mask(void)
30573 +static void hotkey_mask_warn_incomplete_mask(void)
30574 {
30575 /* log only what the user can fix... */
30576 const u32 wantedmask = hotkey_driver_mask &
30577 diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
30578 --- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30579 +++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30580 @@ -59,7 +59,7 @@ do { \
30581 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30582 } while(0)
30583
30584 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30585 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30586 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30587
30588 /*
30589 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30590
30591 cpu = get_cpu();
30592 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30593 +
30594 + pax_open_kernel();
30595 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30596 + pax_close_kernel();
30597
30598 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30599 spin_lock_irqsave(&pnp_bios_lock, flags);
30600 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30601 :"memory");
30602 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30603
30604 + pax_open_kernel();
30605 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30606 + pax_close_kernel();
30607 +
30608 put_cpu();
30609
30610 /* If we get here and this is set then the PnP BIOS faulted on us. */
30611 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30612 return status;
30613 }
30614
30615 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30616 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30617 {
30618 int i;
30619
30620 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30621 pnp_bios_callpoint.offset = header->fields.pm16offset;
30622 pnp_bios_callpoint.segment = PNP_CS16;
30623
30624 + pax_open_kernel();
30625 +
30626 for_each_possible_cpu(i) {
30627 struct desc_struct *gdt = get_cpu_gdt_table(i);
30628 if (!gdt)
30629 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30630 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30631 (unsigned long)__va(header->fields.pm16dseg));
30632 }
30633 +
30634 + pax_close_kernel();
30635 }
30636 diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
30637 --- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30638 +++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30639 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30640 return 1;
30641
30642 /* check if the resource is valid */
30643 - if (*irq < 0 || *irq > 15)
30644 + if (*irq > 15)
30645 return 0;
30646
30647 /* check if the resource is reserved */
30648 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30649 return 1;
30650
30651 /* check if the resource is valid */
30652 - if (*dma < 0 || *dma == 4 || *dma > 7)
30653 + if (*dma == 4 || *dma > 7)
30654 return 0;
30655
30656 /* check if the resource is reserved */
30657 diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
30658 --- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30659 +++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30660 @@ -67,7 +67,7 @@
30661 struct bq27x00_device_info;
30662 struct bq27x00_access_methods {
30663 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30664 -};
30665 +} __no_const;
30666
30667 enum bq27x00_chip { BQ27000, BQ27500 };
30668
30669 diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
30670 --- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30671 +++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30672 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30673 max8660->shadow_regs[MAX8660_OVER1] = 5;
30674 } else {
30675 /* Otherwise devices can be toggled via software */
30676 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30677 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30678 + pax_open_kernel();
30679 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30680 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30681 + pax_close_kernel();
30682 }
30683
30684 /*
30685 diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
30686 --- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30687 +++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30688 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30689 }
30690 mc13xxx_unlock(mc13892);
30691
30692 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30693 + pax_open_kernel();
30694 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30695 = mc13892_vcam_set_mode;
30696 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30697 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30698 = mc13892_vcam_get_mode;
30699 + pax_close_kernel();
30700 for (i = 0; i < pdata->num_regulators; i++) {
30701 init_data = &pdata->regulators[i];
30702 priv->regulators[i] = regulator_register(
30703 diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
30704 --- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30705 +++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30706 @@ -14,6 +14,7 @@
30707 #include <linux/module.h>
30708 #include <linux/rtc.h>
30709 #include <linux/sched.h>
30710 +#include <linux/grsecurity.h>
30711 #include "rtc-core.h"
30712
30713 static dev_t rtc_devt;
30714 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30715 if (copy_from_user(&tm, uarg, sizeof(tm)))
30716 return -EFAULT;
30717
30718 + gr_log_timechange();
30719 +
30720 return rtc_set_time(rtc, &tm);
30721
30722 case RTC_PIE_ON:
30723 diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
30724 --- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30725 +++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30726 @@ -492,7 +492,7 @@ struct adapter_ops
30727 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30728 /* Administrative operations */
30729 int (*adapter_comm)(struct aac_dev * dev, int comm);
30730 -};
30731 +} __no_const;
30732
30733 /*
30734 * Define which interrupt handler needs to be installed
30735 diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
30736 --- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30737 +++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30738 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30739 u32 actual_fibsize64, actual_fibsize = 0;
30740 int i;
30741
30742 + pax_track_stack();
30743
30744 if (dev->in_reset) {
30745 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30746 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
30747 --- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30748 +++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30749 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30750 struct bfad_vport_s *vport, *vport_new;
30751 struct bfa_fcs_driver_info_s driver_info;
30752
30753 + pax_track_stack();
30754 +
30755 /* Fill the driver_info info to fcs*/
30756 memset(&driver_info, 0, sizeof(driver_info));
30757 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30758 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
30759 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30760 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30761 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30762 u16 len, count;
30763 u16 templen;
30764
30765 + pax_track_stack();
30766 +
30767 /*
30768 * get hba attributes
30769 */
30770 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30771 u8 count = 0;
30772 u16 templen;
30773
30774 + pax_track_stack();
30775 +
30776 /*
30777 * get port attributes
30778 */
30779 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
30780 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30781 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30782 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30783 struct fc_rpsc_speed_info_s speeds;
30784 struct bfa_port_attr_s pport_attr;
30785
30786 + pax_track_stack();
30787 +
30788 bfa_trc(port->fcs, rx_fchs->s_id);
30789 bfa_trc(port->fcs, rx_fchs->d_id);
30790
30791 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
30792 --- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30793 +++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30794 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30795 u32 *nvecs, u32 *maxvec);
30796 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30797 u32 *end);
30798 -};
30799 +} __no_const;
30800 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30801
30802 struct bfa_iocfc_s {
30803 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
30804 --- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30805 +++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30806 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30807 bfa_ioc_disable_cbfn_t disable_cbfn;
30808 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30809 bfa_ioc_reset_cbfn_t reset_cbfn;
30810 -};
30811 +} __no_const;
30812
30813 /*
30814 * Heartbeat failure notification queue element.
30815 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30816 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30817 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30818 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30819 -};
30820 +} __no_const;
30821
30822 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30823 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30824 diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
30825 --- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30826 +++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30827 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30828 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30829 *PrototypeHostAdapter)
30830 {
30831 + pax_track_stack();
30832 +
30833 /*
30834 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30835 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30836 diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
30837 --- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30838 +++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30839 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30840 dma_addr_t addr;
30841 ulong flags = 0;
30842
30843 + pax_track_stack();
30844 +
30845 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30846 // get user msg size in u32s
30847 if(get_user(size, &user_msg[0])){
30848 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30849 s32 rcode;
30850 dma_addr_t addr;
30851
30852 + pax_track_stack();
30853 +
30854 memset(msg, 0 , sizeof(msg));
30855 len = scsi_bufflen(cmd);
30856 direction = 0x00000000;
30857 diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
30858 --- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30859 +++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30860 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30861 struct hostdata *ha;
30862 char name[16];
30863
30864 + pax_track_stack();
30865 +
30866 sprintf(name, "%s%d", driver_name, j);
30867
30868 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30869 diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
30870 --- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30871 +++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30872 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30873 } buf;
30874 int rc;
30875
30876 + pax_track_stack();
30877 +
30878 fiph = (struct fip_header *)skb->data;
30879 sub = fiph->fip_subcode;
30880
30881 diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
30882 --- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30883 +++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30884 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30885 unsigned long flags;
30886 gdth_ha_str *ha;
30887
30888 + pax_track_stack();
30889 +
30890 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30891 return -EFAULT;
30892 ha = gdth_find_ha(ldrv.ionode);
30893 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30894 gdth_ha_str *ha;
30895 int rval;
30896
30897 + pax_track_stack();
30898 +
30899 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30900 res.number >= MAX_HDRIVES)
30901 return -EFAULT;
30902 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30903 gdth_ha_str *ha;
30904 int rval;
30905
30906 + pax_track_stack();
30907 +
30908 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30909 return -EFAULT;
30910 ha = gdth_find_ha(gen.ionode);
30911 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30912 int i;
30913 gdth_cmd_str gdtcmd;
30914 char cmnd[MAX_COMMAND_SIZE];
30915 +
30916 + pax_track_stack();
30917 +
30918 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30919
30920 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30921 diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
30922 --- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30923 +++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30924 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30925 u64 paddr;
30926
30927 char cmnd[MAX_COMMAND_SIZE];
30928 +
30929 + pax_track_stack();
30930 +
30931 memset(cmnd, 0xff, 12);
30932 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30933
30934 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30935 gdth_hget_str *phg;
30936 char cmnd[MAX_COMMAND_SIZE];
30937
30938 + pax_track_stack();
30939 +
30940 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30941 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30942 if (!gdtcmd || !estr)
30943 diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
30944 --- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30945 +++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30946 @@ -42,7 +42,7 @@
30947 #include "scsi_logging.h"
30948
30949
30950 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
30951 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30952
30953
30954 static void scsi_host_cls_release(struct device *dev)
30955 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30956 * subtract one because we increment first then return, but we need to
30957 * know what the next host number was before increment
30958 */
30959 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30960 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30961 shost->dma_channel = 0xff;
30962
30963 /* These three are default values which can be overridden */
30964 diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
30965 --- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30966 +++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30967 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30968 u32 a;
30969
30970 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30971 - return h->access.command_completed(h);
30972 + return h->access->command_completed(h);
30973
30974 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30975 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30976 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30977 while (!list_empty(&h->reqQ)) {
30978 c = list_entry(h->reqQ.next, struct CommandList, list);
30979 /* can't do anything if fifo is full */
30980 - if ((h->access.fifo_full(h))) {
30981 + if ((h->access->fifo_full(h))) {
30982 dev_warn(&h->pdev->dev, "fifo full\n");
30983 break;
30984 }
30985 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30986 h->Qdepth--;
30987
30988 /* Tell the controller execute command */
30989 - h->access.submit_command(h, c);
30990 + h->access->submit_command(h, c);
30991
30992 /* Put job onto the completed Q */
30993 addQ(&h->cmpQ, c);
30994 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30995
30996 static inline unsigned long get_next_completion(struct ctlr_info *h)
30997 {
30998 - return h->access.command_completed(h);
30999 + return h->access->command_completed(h);
31000 }
31001
31002 static inline bool interrupt_pending(struct ctlr_info *h)
31003 {
31004 - return h->access.intr_pending(h);
31005 + return h->access->intr_pending(h);
31006 }
31007
31008 static inline long interrupt_not_for_us(struct ctlr_info *h)
31009 {
31010 - return (h->access.intr_pending(h) == 0) ||
31011 + return (h->access->intr_pending(h) == 0) ||
31012 (h->interrupts_enabled == 0);
31013 }
31014
31015 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31016 if (prod_index < 0)
31017 return -ENODEV;
31018 h->product_name = products[prod_index].product_name;
31019 - h->access = *(products[prod_index].access);
31020 + h->access = products[prod_index].access;
31021
31022 if (hpsa_board_disabled(h->pdev)) {
31023 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31024 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31025 }
31026
31027 /* make sure the board interrupts are off */
31028 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31029 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31030
31031 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31032 goto clean2;
31033 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31034 * fake ones to scoop up any residual completions.
31035 */
31036 spin_lock_irqsave(&h->lock, flags);
31037 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31038 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31039 spin_unlock_irqrestore(&h->lock, flags);
31040 free_irq(h->intr[h->intr_mode], h);
31041 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31042 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31043 dev_info(&h->pdev->dev, "Board READY.\n");
31044 dev_info(&h->pdev->dev,
31045 "Waiting for stale completions to drain.\n");
31046 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31047 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31048 msleep(10000);
31049 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31050 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31051
31052 rc = controller_reset_failed(h->cfgtable);
31053 if (rc)
31054 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31055 }
31056
31057 /* Turn the interrupts on so we can service requests */
31058 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31059 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31060
31061 hpsa_hba_inquiry(h);
31062 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31063 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31064 * To write all data in the battery backed cache to disks
31065 */
31066 hpsa_flush_cache(h);
31067 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31068 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31069 free_irq(h->intr[h->intr_mode], h);
31070 #ifdef CONFIG_PCI_MSI
31071 if (h->msix_vector)
31072 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31073 return;
31074 }
31075 /* Change the access methods to the performant access methods */
31076 - h->access = SA5_performant_access;
31077 + h->access = &SA5_performant_access;
31078 h->transMethod = CFGTBL_Trans_Performant;
31079 }
31080
31081 diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31082 --- linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31083 +++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31084 @@ -73,7 +73,7 @@ struct ctlr_info {
31085 unsigned int msix_vector;
31086 unsigned int msi_vector;
31087 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31088 - struct access_method access;
31089 + struct access_method *access;
31090
31091 /* queue and queue Info */
31092 struct list_head reqQ;
31093 diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31094 --- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31095 +++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31096 @@ -1027,7 +1027,7 @@ typedef struct {
31097 int (*intr)(struct ips_ha *);
31098 void (*enableint)(struct ips_ha *);
31099 uint32_t (*statupd)(struct ips_ha *);
31100 -} ips_hw_func_t;
31101 +} __no_const ips_hw_func_t;
31102
31103 typedef struct ips_ha {
31104 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31105 diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31106 --- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31107 +++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31108 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31109 * all together if not used XXX
31110 */
31111 struct {
31112 - atomic_t no_free_exch;
31113 - atomic_t no_free_exch_xid;
31114 - atomic_t xid_not_found;
31115 - atomic_t xid_busy;
31116 - atomic_t seq_not_found;
31117 - atomic_t non_bls_resp;
31118 + atomic_unchecked_t no_free_exch;
31119 + atomic_unchecked_t no_free_exch_xid;
31120 + atomic_unchecked_t xid_not_found;
31121 + atomic_unchecked_t xid_busy;
31122 + atomic_unchecked_t seq_not_found;
31123 + atomic_unchecked_t non_bls_resp;
31124 } stats;
31125 };
31126
31127 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31128 /* allocate memory for exchange */
31129 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31130 if (!ep) {
31131 - atomic_inc(&mp->stats.no_free_exch);
31132 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31133 goto out;
31134 }
31135 memset(ep, 0, sizeof(*ep));
31136 @@ -761,7 +761,7 @@ out:
31137 return ep;
31138 err:
31139 spin_unlock_bh(&pool->lock);
31140 - atomic_inc(&mp->stats.no_free_exch_xid);
31141 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31142 mempool_free(ep, mp->ep_pool);
31143 return NULL;
31144 }
31145 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31146 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31147 ep = fc_exch_find(mp, xid);
31148 if (!ep) {
31149 - atomic_inc(&mp->stats.xid_not_found);
31150 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31151 reject = FC_RJT_OX_ID;
31152 goto out;
31153 }
31154 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31155 ep = fc_exch_find(mp, xid);
31156 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31157 if (ep) {
31158 - atomic_inc(&mp->stats.xid_busy);
31159 + atomic_inc_unchecked(&mp->stats.xid_busy);
31160 reject = FC_RJT_RX_ID;
31161 goto rel;
31162 }
31163 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31164 }
31165 xid = ep->xid; /* get our XID */
31166 } else if (!ep) {
31167 - atomic_inc(&mp->stats.xid_not_found);
31168 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31169 reject = FC_RJT_RX_ID; /* XID not found */
31170 goto out;
31171 }
31172 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31173 } else {
31174 sp = &ep->seq;
31175 if (sp->id != fh->fh_seq_id) {
31176 - atomic_inc(&mp->stats.seq_not_found);
31177 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31178 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31179 goto rel;
31180 }
31181 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31182
31183 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31184 if (!ep) {
31185 - atomic_inc(&mp->stats.xid_not_found);
31186 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31187 goto out;
31188 }
31189 if (ep->esb_stat & ESB_ST_COMPLETE) {
31190 - atomic_inc(&mp->stats.xid_not_found);
31191 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31192 goto rel;
31193 }
31194 if (ep->rxid == FC_XID_UNKNOWN)
31195 ep->rxid = ntohs(fh->fh_rx_id);
31196 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31197 - atomic_inc(&mp->stats.xid_not_found);
31198 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31199 goto rel;
31200 }
31201 if (ep->did != ntoh24(fh->fh_s_id) &&
31202 ep->did != FC_FID_FLOGI) {
31203 - atomic_inc(&mp->stats.xid_not_found);
31204 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31205 goto rel;
31206 }
31207 sof = fr_sof(fp);
31208 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31209 sp->ssb_stat |= SSB_ST_RESP;
31210 sp->id = fh->fh_seq_id;
31211 } else if (sp->id != fh->fh_seq_id) {
31212 - atomic_inc(&mp->stats.seq_not_found);
31213 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31214 goto rel;
31215 }
31216
31217 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31218 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31219
31220 if (!sp)
31221 - atomic_inc(&mp->stats.xid_not_found);
31222 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31223 else
31224 - atomic_inc(&mp->stats.non_bls_resp);
31225 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31226
31227 fc_frame_free(fp);
31228 }
31229 diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31230 --- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31231 +++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31232 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31233 .postreset = ata_std_postreset,
31234 .error_handler = ata_std_error_handler,
31235 .post_internal_cmd = sas_ata_post_internal,
31236 - .qc_defer = ata_std_qc_defer,
31237 + .qc_defer = ata_std_qc_defer,
31238 .qc_prep = ata_noop_qc_prep,
31239 .qc_issue = sas_ata_qc_issue,
31240 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31241 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31242 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31243 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31244 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31245
31246 #include <linux/debugfs.h>
31247
31248 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31249 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31250 static unsigned long lpfc_debugfs_start_time = 0L;
31251
31252 /* iDiag */
31253 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31254 lpfc_debugfs_enable = 0;
31255
31256 len = 0;
31257 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31258 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31259 (lpfc_debugfs_max_disc_trc - 1);
31260 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31261 dtp = vport->disc_trc + i;
31262 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31263 lpfc_debugfs_enable = 0;
31264
31265 len = 0;
31266 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31267 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31268 (lpfc_debugfs_max_slow_ring_trc - 1);
31269 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31270 dtp = phba->slow_ring_trc + i;
31271 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31272 uint32_t *ptr;
31273 char buffer[1024];
31274
31275 + pax_track_stack();
31276 +
31277 off = 0;
31278 spin_lock_irq(&phba->hbalock);
31279
31280 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31281 !vport || !vport->disc_trc)
31282 return;
31283
31284 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31285 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31286 (lpfc_debugfs_max_disc_trc - 1);
31287 dtp = vport->disc_trc + index;
31288 dtp->fmt = fmt;
31289 dtp->data1 = data1;
31290 dtp->data2 = data2;
31291 dtp->data3 = data3;
31292 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31293 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31294 dtp->jif = jiffies;
31295 #endif
31296 return;
31297 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31298 !phba || !phba->slow_ring_trc)
31299 return;
31300
31301 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31302 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31303 (lpfc_debugfs_max_slow_ring_trc - 1);
31304 dtp = phba->slow_ring_trc + index;
31305 dtp->fmt = fmt;
31306 dtp->data1 = data1;
31307 dtp->data2 = data2;
31308 dtp->data3 = data3;
31309 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31310 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31311 dtp->jif = jiffies;
31312 #endif
31313 return;
31314 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31315 "slow_ring buffer\n");
31316 goto debug_failed;
31317 }
31318 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31319 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31320 memset(phba->slow_ring_trc, 0,
31321 (sizeof(struct lpfc_debugfs_trc) *
31322 lpfc_debugfs_max_slow_ring_trc));
31323 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31324 "buffer\n");
31325 goto debug_failed;
31326 }
31327 - atomic_set(&vport->disc_trc_cnt, 0);
31328 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31329
31330 snprintf(name, sizeof(name), "discovery_trace");
31331 vport->debug_disc_trc =
31332 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31333 --- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31334 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31335 @@ -420,7 +420,7 @@ struct lpfc_vport {
31336 struct dentry *debug_nodelist;
31337 struct dentry *vport_debugfs_root;
31338 struct lpfc_debugfs_trc *disc_trc;
31339 - atomic_t disc_trc_cnt;
31340 + atomic_unchecked_t disc_trc_cnt;
31341 #endif
31342 uint8_t stat_data_enabled;
31343 uint8_t stat_data_blocked;
31344 @@ -826,8 +826,8 @@ struct lpfc_hba {
31345 struct timer_list fabric_block_timer;
31346 unsigned long bit_flags;
31347 #define FABRIC_COMANDS_BLOCKED 0
31348 - atomic_t num_rsrc_err;
31349 - atomic_t num_cmd_success;
31350 + atomic_unchecked_t num_rsrc_err;
31351 + atomic_unchecked_t num_cmd_success;
31352 unsigned long last_rsrc_error_time;
31353 unsigned long last_ramp_down_time;
31354 unsigned long last_ramp_up_time;
31355 @@ -841,7 +841,7 @@ struct lpfc_hba {
31356 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31357 struct dentry *debug_slow_ring_trc;
31358 struct lpfc_debugfs_trc *slow_ring_trc;
31359 - atomic_t slow_ring_trc_cnt;
31360 + atomic_unchecked_t slow_ring_trc_cnt;
31361 /* iDiag debugfs sub-directory */
31362 struct dentry *idiag_root;
31363 struct dentry *idiag_pci_cfg;
31364 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31365 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31366 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31367 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31368 printk(LPFC_COPYRIGHT "\n");
31369
31370 if (lpfc_enable_npiv) {
31371 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31372 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31373 + pax_open_kernel();
31374 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31375 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31376 + pax_close_kernel();
31377 }
31378 lpfc_transport_template =
31379 fc_attach_transport(&lpfc_transport_functions);
31380 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31381 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31382 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31383 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31384 uint32_t evt_posted;
31385
31386 spin_lock_irqsave(&phba->hbalock, flags);
31387 - atomic_inc(&phba->num_rsrc_err);
31388 + atomic_inc_unchecked(&phba->num_rsrc_err);
31389 phba->last_rsrc_error_time = jiffies;
31390
31391 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31392 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31393 unsigned long flags;
31394 struct lpfc_hba *phba = vport->phba;
31395 uint32_t evt_posted;
31396 - atomic_inc(&phba->num_cmd_success);
31397 + atomic_inc_unchecked(&phba->num_cmd_success);
31398
31399 if (vport->cfg_lun_queue_depth <= queue_depth)
31400 return;
31401 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31402 unsigned long num_rsrc_err, num_cmd_success;
31403 int i;
31404
31405 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31406 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31407 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31408 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31409
31410 vports = lpfc_create_vport_work_array(phba);
31411 if (vports != NULL)
31412 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31413 }
31414 }
31415 lpfc_destroy_vport_work_array(phba, vports);
31416 - atomic_set(&phba->num_rsrc_err, 0);
31417 - atomic_set(&phba->num_cmd_success, 0);
31418 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31419 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31420 }
31421
31422 /**
31423 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31424 }
31425 }
31426 lpfc_destroy_vport_work_array(phba, vports);
31427 - atomic_set(&phba->num_rsrc_err, 0);
31428 - atomic_set(&phba->num_cmd_success, 0);
31429 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31430 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31431 }
31432
31433 /**
31434 diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31435 --- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31436 +++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31437 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31438 int rval;
31439 int i;
31440
31441 + pax_track_stack();
31442 +
31443 // Allocate memory for the base list of scb for management module.
31444 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31445
31446 diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31447 --- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31448 +++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31449 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31450 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31451 int ret;
31452
31453 + pax_track_stack();
31454 +
31455 or = osd_start_request(od, GFP_KERNEL);
31456 if (!or)
31457 return -ENOMEM;
31458 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
31459 --- linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31460 +++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31461 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31462 res->scsi_dev = scsi_dev;
31463 scsi_dev->hostdata = res;
31464 res->change_detected = 0;
31465 - atomic_set(&res->read_failures, 0);
31466 - atomic_set(&res->write_failures, 0);
31467 + atomic_set_unchecked(&res->read_failures, 0);
31468 + atomic_set_unchecked(&res->write_failures, 0);
31469 rc = 0;
31470 }
31471 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31472 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31473
31474 /* If this was a SCSI read/write command keep count of errors */
31475 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31476 - atomic_inc(&res->read_failures);
31477 + atomic_inc_unchecked(&res->read_failures);
31478 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31479 - atomic_inc(&res->write_failures);
31480 + atomic_inc_unchecked(&res->write_failures);
31481
31482 if (!RES_IS_GSCSI(res->cfg_entry) &&
31483 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31484 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31485 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31486 * hrrq_id assigned here in queuecommand
31487 */
31488 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31489 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31490 pinstance->num_hrrq;
31491 cmd->cmd_done = pmcraid_io_done;
31492
31493 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31494 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31495 * hrrq_id assigned here in queuecommand
31496 */
31497 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31498 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31499 pinstance->num_hrrq;
31500
31501 if (request_size) {
31502 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31503
31504 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31505 /* add resources only after host is added into system */
31506 - if (!atomic_read(&pinstance->expose_resources))
31507 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31508 return;
31509
31510 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31511 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31512 init_waitqueue_head(&pinstance->reset_wait_q);
31513
31514 atomic_set(&pinstance->outstanding_cmds, 0);
31515 - atomic_set(&pinstance->last_message_id, 0);
31516 - atomic_set(&pinstance->expose_resources, 0);
31517 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31518 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31519
31520 INIT_LIST_HEAD(&pinstance->free_res_q);
31521 INIT_LIST_HEAD(&pinstance->used_res_q);
31522 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31523 /* Schedule worker thread to handle CCN and take care of adding and
31524 * removing devices to OS
31525 */
31526 - atomic_set(&pinstance->expose_resources, 1);
31527 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31528 schedule_work(&pinstance->worker_q);
31529 return rc;
31530
31531 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
31532 --- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31533 +++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31534 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31535 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31536
31537 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31538 - atomic_t last_message_id;
31539 + atomic_unchecked_t last_message_id;
31540
31541 /* configuration table */
31542 struct pmcraid_config_table *cfg_table;
31543 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31544 atomic_t outstanding_cmds;
31545
31546 /* should add/delete resources to mid-layer now ?*/
31547 - atomic_t expose_resources;
31548 + atomic_unchecked_t expose_resources;
31549
31550
31551
31552 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31553 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31554 };
31555 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31556 - atomic_t read_failures; /* count of failed READ commands */
31557 - atomic_t write_failures; /* count of failed WRITE commands */
31558 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31559 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31560
31561 /* To indicate add/delete/modify during CCN */
31562 u8 change_detected;
31563 diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
31564 --- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31565 +++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31566 @@ -2244,7 +2244,7 @@ struct isp_operations {
31567 int (*get_flash_version) (struct scsi_qla_host *, void *);
31568 int (*start_scsi) (srb_t *);
31569 int (*abort_isp) (struct scsi_qla_host *);
31570 -};
31571 +} __no_const;
31572
31573 /* MSI-X Support *************************************************************/
31574
31575 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
31576 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31577 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31578 @@ -256,7 +256,7 @@ struct ddb_entry {
31579 atomic_t retry_relogin_timer; /* Min Time between relogins
31580 * (4000 only) */
31581 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31582 - atomic_t relogin_retry_count; /* Num of times relogin has been
31583 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31584 * retried */
31585
31586 uint16_t port;
31587 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
31588 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31589 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31590 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31591 ddb_entry->fw_ddb_index = fw_ddb_index;
31592 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31593 atomic_set(&ddb_entry->relogin_timer, 0);
31594 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31595 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31596 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31597 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31598 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31599 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31600 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31601 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31602 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31603 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31604 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31605 atomic_set(&ddb_entry->relogin_timer, 0);
31606 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31607 iscsi_unblock_session(ddb_entry->sess);
31608 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
31609 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31610 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31611 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31612 ddb_entry->fw_ddb_device_state ==
31613 DDB_DS_SESSION_FAILED) {
31614 /* Reset retry relogin timer */
31615 - atomic_inc(&ddb_entry->relogin_retry_count);
31616 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31617 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31618 " timed out-retrying"
31619 " relogin (%d)\n",
31620 ha->host_no,
31621 ddb_entry->fw_ddb_index,
31622 - atomic_read(&ddb_entry->
31623 + atomic_read_unchecked(&ddb_entry->
31624 relogin_retry_count))
31625 );
31626 start_dpc++;
31627 diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
31628 --- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31629 +++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31630 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31631 unsigned long timeout;
31632 int rtn = 0;
31633
31634 - atomic_inc(&cmd->device->iorequest_cnt);
31635 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31636
31637 /* check if the device is still usable */
31638 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31639 diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
31640 --- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31641 +++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31642 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31643 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31644 unsigned char *cmd = (unsigned char *)scp->cmnd;
31645
31646 + pax_track_stack();
31647 +
31648 if ((errsts = check_readiness(scp, 1, devip)))
31649 return errsts;
31650 memset(arr, 0, sizeof(arr));
31651 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31652 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31653 unsigned char *cmd = (unsigned char *)scp->cmnd;
31654
31655 + pax_track_stack();
31656 +
31657 if ((errsts = check_readiness(scp, 1, devip)))
31658 return errsts;
31659 memset(arr, 0, sizeof(arr));
31660 diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
31661 --- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31662 +++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31663 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31664 shost = sdev->host;
31665 scsi_init_cmd_errh(cmd);
31666 cmd->result = DID_NO_CONNECT << 16;
31667 - atomic_inc(&cmd->device->iorequest_cnt);
31668 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31669
31670 /*
31671 * SCSI request completion path will do scsi_device_unbusy(),
31672 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31673
31674 INIT_LIST_HEAD(&cmd->eh_entry);
31675
31676 - atomic_inc(&cmd->device->iodone_cnt);
31677 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31678 if (cmd->result)
31679 - atomic_inc(&cmd->device->ioerr_cnt);
31680 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31681
31682 disposition = scsi_decide_disposition(cmd);
31683 if (disposition != SUCCESS &&
31684 diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
31685 --- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31686 +++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31687 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31688 char *buf) \
31689 { \
31690 struct scsi_device *sdev = to_scsi_device(dev); \
31691 - unsigned long long count = atomic_read(&sdev->field); \
31692 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31693 return snprintf(buf, 20, "0x%llx\n", count); \
31694 } \
31695 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31696 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
31697 --- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31698 +++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31699 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31700 * Netlink Infrastructure
31701 */
31702
31703 -static atomic_t fc_event_seq;
31704 +static atomic_unchecked_t fc_event_seq;
31705
31706 /**
31707 * fc_get_event_number - Obtain the next sequential FC event number
31708 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31709 u32
31710 fc_get_event_number(void)
31711 {
31712 - return atomic_add_return(1, &fc_event_seq);
31713 + return atomic_add_return_unchecked(1, &fc_event_seq);
31714 }
31715 EXPORT_SYMBOL(fc_get_event_number);
31716
31717 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31718 {
31719 int error;
31720
31721 - atomic_set(&fc_event_seq, 0);
31722 + atomic_set_unchecked(&fc_event_seq, 0);
31723
31724 error = transport_class_register(&fc_host_class);
31725 if (error)
31726 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31727 char *cp;
31728
31729 *val = simple_strtoul(buf, &cp, 0);
31730 - if ((*cp && (*cp != '\n')) || (*val < 0))
31731 + if (*cp && (*cp != '\n'))
31732 return -EINVAL;
31733 /*
31734 * Check for overflow; dev_loss_tmo is u32
31735 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
31736 --- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31737 +++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31738 @@ -83,7 +83,7 @@ struct iscsi_internal {
31739 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31740 };
31741
31742 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31743 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31744 static struct workqueue_struct *iscsi_eh_timer_workq;
31745
31746 /*
31747 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31748 int err;
31749
31750 ihost = shost->shost_data;
31751 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31752 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31753
31754 if (id == ISCSI_MAX_TARGET) {
31755 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31756 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31757 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31758 ISCSI_TRANSPORT_VERSION);
31759
31760 - atomic_set(&iscsi_session_nr, 0);
31761 + atomic_set_unchecked(&iscsi_session_nr, 0);
31762
31763 err = class_register(&iscsi_transport_class);
31764 if (err)
31765 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
31766 --- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31767 +++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31768 @@ -33,7 +33,7 @@
31769 #include "scsi_transport_srp_internal.h"
31770
31771 struct srp_host_attrs {
31772 - atomic_t next_port_id;
31773 + atomic_unchecked_t next_port_id;
31774 };
31775 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31776
31777 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31778 struct Scsi_Host *shost = dev_to_shost(dev);
31779 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31780
31781 - atomic_set(&srp_host->next_port_id, 0);
31782 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31783 return 0;
31784 }
31785
31786 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31787 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31788 rport->roles = ids->roles;
31789
31790 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31791 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31792 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31793
31794 transport_setup_device(&rport->dev);
31795 diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
31796 --- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31797 +++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31798 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31799 const struct file_operations * fops;
31800 };
31801
31802 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31803 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31804 {"allow_dio", &adio_fops},
31805 {"debug", &debug_fops},
31806 {"def_reserved_size", &dressz_fops},
31807 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31808 {
31809 int k, mask;
31810 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31811 - struct sg_proc_leaf * leaf;
31812 + const struct sg_proc_leaf * leaf;
31813
31814 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31815 if (!sg_proc_sgp)
31816 diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31817 --- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31818 +++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31819 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31820 int do_iounmap = 0;
31821 int do_disable_device = 1;
31822
31823 + pax_track_stack();
31824 +
31825 memset(&sym_dev, 0, sizeof(sym_dev));
31826 memset(&nvram, 0, sizeof(nvram));
31827 sym_dev.pdev = pdev;
31828 diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
31829 --- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31830 +++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31831 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31832 dma_addr_t base;
31833 unsigned i;
31834
31835 + pax_track_stack();
31836 +
31837 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31838 cmd.reqRingNumPages = adapter->req_pages;
31839 cmd.cmpRingNumPages = adapter->cmp_pages;
31840 diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
31841 --- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31842 +++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31843 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31844 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31845
31846 /* portable code must never pass more than 32 bytes */
31847 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31848 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31849
31850 static u8 *buf;
31851
31852 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31853 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31854 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31855 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31856 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31857
31858
31859 -static struct net_device_ops ar6000_netdev_ops = {
31860 +static net_device_ops_no_const ar6000_netdev_ops = {
31861 .ndo_init = NULL,
31862 .ndo_open = ar6000_open,
31863 .ndo_stop = ar6000_close,
31864 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31865 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31866 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31867 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31868 typedef struct ar6k_pal_config_s
31869 {
31870 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31871 -}ar6k_pal_config_t;
31872 +} __no_const ar6k_pal_config_t;
31873
31874 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31875 #endif /* _AR6K_PAL_H_ */
31876 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31877 --- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31878 +++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31879 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31880 free_netdev(ifp->net);
31881 }
31882 /* Allocate etherdev, including space for private structure */
31883 - ifp->net = alloc_etherdev(sizeof(dhd));
31884 + ifp->net = alloc_etherdev(sizeof(*dhd));
31885 if (!ifp->net) {
31886 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31887 ret = -ENOMEM;
31888 }
31889 if (ret == 0) {
31890 strcpy(ifp->net->name, ifp->name);
31891 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31892 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31893 err = dhd_net_attach(&dhd->pub, ifp->idx);
31894 if (err != 0) {
31895 DHD_ERROR(("%s: dhd_net_attach failed, "
31896 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31897 strcpy(nv_path, nvram_path);
31898
31899 /* Allocate etherdev, including space for private structure */
31900 - net = alloc_etherdev(sizeof(dhd));
31901 + net = alloc_etherdev(sizeof(*dhd));
31902 if (!net) {
31903 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31904 goto fail;
31905 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31906 /*
31907 * Save the dhd_info into the priv
31908 */
31909 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31910 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31911
31912 /* Set network interface name if it was provided as module parameter */
31913 if (iface_name[0]) {
31914 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31915 /*
31916 * Save the dhd_info into the priv
31917 */
31918 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31919 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31920
31921 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31922 g_bus = bus;
31923 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31924 --- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31925 +++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31926 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31927 initfn_t carrsuppr;
31928 rxsigpwrfn_t rxsigpwr;
31929 detachfn_t detach;
31930 -};
31931 +} __no_const;
31932 typedef struct phy_func_ptr phy_func_ptr_t;
31933
31934 struct phy_info {
31935 diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
31936 --- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31937 +++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31938 @@ -185,7 +185,7 @@ typedef struct {
31939 u16 func, uint bustype, void *regsva, void *param);
31940 /* detach from device */
31941 void (*detach) (void *ch);
31942 -} bcmsdh_driver_t;
31943 +} __no_const bcmsdh_driver_t;
31944
31945 /* platform specific/high level functions */
31946 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31947 diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
31948 --- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31949 +++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31950 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31951 struct net_device_stats *stats = &etdev->net_stats;
31952
31953 if (tcb->flags & fMP_DEST_BROAD)
31954 - atomic_inc(&etdev->Stats.brdcstxmt);
31955 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31956 else if (tcb->flags & fMP_DEST_MULTI)
31957 - atomic_inc(&etdev->Stats.multixmt);
31958 + atomic_inc_unchecked(&etdev->Stats.multixmt);
31959 else
31960 - atomic_inc(&etdev->Stats.unixmt);
31961 + atomic_inc_unchecked(&etdev->Stats.unixmt);
31962
31963 if (tcb->skb) {
31964 stats->tx_bytes += tcb->skb->len;
31965 diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
31966 --- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31967 +++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31968 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31969 * operations
31970 */
31971 u32 unircv; /* # multicast packets received */
31972 - atomic_t unixmt; /* # multicast packets for Tx */
31973 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31974 u32 multircv; /* # multicast packets received */
31975 - atomic_t multixmt; /* # multicast packets for Tx */
31976 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31977 u32 brdcstrcv; /* # broadcast packets received */
31978 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
31979 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31980 u32 norcvbuf; /* # Rx packets discarded */
31981 u32 noxmtbuf; /* # Tx packets discarded */
31982
31983 diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
31984 --- linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31985 +++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31986 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31987 int ret = 0;
31988 int t;
31989
31990 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31991 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31992 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31993 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31994
31995 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31996 if (ret)
31997 diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
31998 --- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31999 +++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32000 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32001 u64 output_address = (output) ? virt_to_phys(output) : 0;
32002 u32 output_address_hi = output_address >> 32;
32003 u32 output_address_lo = output_address & 0xFFFFFFFF;
32004 - volatile void *hypercall_page = hv_context.hypercall_page;
32005 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32006
32007 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32008 "=a"(hv_status_lo) : "d" (control_hi),
32009 diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32010 --- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32011 +++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32012 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32013 if (hid_dev) {
32014 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32015
32016 - hid_dev->ll_driver->open = mousevsc_hid_open;
32017 - hid_dev->ll_driver->close = mousevsc_hid_close;
32018 + pax_open_kernel();
32019 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32020 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32021 + pax_close_kernel();
32022
32023 hid_dev->bus = BUS_VIRTUAL;
32024 hid_dev->vendor = input_device_ctx->device_info.vendor;
32025 diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32026 --- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32027 +++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32028 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32029 struct vmbus_connection {
32030 enum vmbus_connect_state conn_state;
32031
32032 - atomic_t next_gpadl_handle;
32033 + atomic_unchecked_t next_gpadl_handle;
32034
32035 /*
32036 * Represents channel interrupts. Each bit position represents a
32037 diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32038 --- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32039 +++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32040 @@ -43,7 +43,7 @@ struct rndis_device {
32041
32042 enum rndis_device_state state;
32043 u32 link_stat;
32044 - atomic_t new_req_id;
32045 + atomic_unchecked_t new_req_id;
32046
32047 spinlock_t request_lock;
32048 struct list_head req_list;
32049 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32050 * template
32051 */
32052 set = &rndis_msg->msg.set_req;
32053 - set->req_id = atomic_inc_return(&dev->new_req_id);
32054 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32055
32056 /* Add to the request list */
32057 spin_lock_irqsave(&dev->request_lock, flags);
32058 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32059
32060 /* Setup the rndis set */
32061 halt = &request->request_msg.msg.halt_req;
32062 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32063 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32064
32065 /* Ignore return since this msg is optional. */
32066 rndis_filter_send_request(dev, request);
32067 diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32068 --- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32069 +++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32070 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32071 {
32072 int ret = 0;
32073
32074 - static atomic_t device_num = ATOMIC_INIT(0);
32075 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32076
32077 /* Set the device name. Otherwise, device_register() will fail. */
32078 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32079 - atomic_inc_return(&device_num));
32080 + atomic_inc_return_unchecked(&device_num));
32081
32082 /* The new device belongs to this bus */
32083 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32084 diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32085 --- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32086 +++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32087 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32088
32089 int (*is_enabled)(struct iio_ring_buffer *ring);
32090 int (*enable)(struct iio_ring_buffer *ring);
32091 -};
32092 +} __no_const;
32093
32094 struct iio_ring_setup_ops {
32095 int (*preenable)(struct iio_dev *);
32096 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32097 --- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32098 +++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32099 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32100 * since the RX tasklet also increments it.
32101 */
32102 #ifdef CONFIG_64BIT
32103 - atomic64_add(rx_status.dropped_packets,
32104 - (atomic64_t *)&priv->stats.rx_dropped);
32105 + atomic64_add_unchecked(rx_status.dropped_packets,
32106 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32107 #else
32108 - atomic_add(rx_status.dropped_packets,
32109 - (atomic_t *)&priv->stats.rx_dropped);
32110 + atomic_add_unchecked(rx_status.dropped_packets,
32111 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32112 #endif
32113 }
32114
32115 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32116 --- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32117 +++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32118 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32119 /* Increment RX stats for virtual ports */
32120 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32121 #ifdef CONFIG_64BIT
32122 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32123 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32124 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32125 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32126 #else
32127 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32128 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32129 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32130 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32131 #endif
32132 }
32133 netif_receive_skb(skb);
32134 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32135 dev->name);
32136 */
32137 #ifdef CONFIG_64BIT
32138 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32139 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32140 #else
32141 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32142 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32143 #endif
32144 dev_kfree_skb_irq(skb);
32145 }
32146 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32147 --- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32148 +++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32149 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32150 mutex_init(&psb->mcache_lock);
32151 psb->mcache_root = RB_ROOT;
32152 psb->mcache_timeout = msecs_to_jiffies(5000);
32153 - atomic_long_set(&psb->mcache_gen, 0);
32154 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32155
32156 psb->trans_max_pages = 100;
32157
32158 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32159 INIT_LIST_HEAD(&psb->crypto_ready_list);
32160 INIT_LIST_HEAD(&psb->crypto_active_list);
32161
32162 - atomic_set(&psb->trans_gen, 1);
32163 + atomic_set_unchecked(&psb->trans_gen, 1);
32164 atomic_long_set(&psb->total_inodes, 0);
32165
32166 mutex_init(&psb->state_lock);
32167 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32168 --- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32169 +++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32170 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32171 m->data = data;
32172 m->start = start;
32173 m->size = size;
32174 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32175 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32176
32177 mutex_lock(&psb->mcache_lock);
32178 err = pohmelfs_mcache_insert(psb, m);
32179 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32180 --- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32181 +++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32182 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32183 struct pohmelfs_sb {
32184 struct rb_root mcache_root;
32185 struct mutex mcache_lock;
32186 - atomic_long_t mcache_gen;
32187 + atomic_long_unchecked_t mcache_gen;
32188 unsigned long mcache_timeout;
32189
32190 unsigned int idx;
32191
32192 unsigned int trans_retries;
32193
32194 - atomic_t trans_gen;
32195 + atomic_unchecked_t trans_gen;
32196
32197 unsigned int crypto_attached_size;
32198 unsigned int crypto_align_size;
32199 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32200 --- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32201 +++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32202 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32203 int err;
32204 struct netfs_cmd *cmd = t->iovec.iov_base;
32205
32206 - t->gen = atomic_inc_return(&psb->trans_gen);
32207 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32208
32209 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32210 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32211 diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32212 --- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32213 +++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32214 @@ -83,7 +83,7 @@ struct _io_ops {
32215 u8 *pmem);
32216 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32217 u8 *pmem);
32218 -};
32219 +} __no_const;
32220
32221 struct io_req {
32222 struct list_head list;
32223 diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32224 --- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32225 +++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32226 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32227 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32228
32229 if (rlen)
32230 - if (copy_to_user(data, &resp, rlen))
32231 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32232 return -EFAULT;
32233
32234 return 0;
32235 diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32236 --- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32237 +++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32238 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32239 struct stlport stl_dummyport;
32240 struct stlport *portp;
32241
32242 + pax_track_stack();
32243 +
32244 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32245 return -EFAULT;
32246 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32247 diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32248 --- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32249 +++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32250 @@ -315,7 +315,7 @@ struct usbip_device {
32251 void (*shutdown)(struct usbip_device *);
32252 void (*reset)(struct usbip_device *);
32253 void (*unusable)(struct usbip_device *);
32254 - } eh_ops;
32255 + } __no_const eh_ops;
32256 };
32257
32258 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32259 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32260 --- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32261 +++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32262 @@ -94,7 +94,7 @@ struct vhci_hcd {
32263 unsigned resuming:1;
32264 unsigned long re_timeout;
32265
32266 - atomic_t seqnum;
32267 + atomic_unchecked_t seqnum;
32268
32269 /*
32270 * NOTE:
32271 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32272 --- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32273 +++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32274 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32275 return;
32276 }
32277
32278 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32279 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32280 if (priv->seqnum == 0xffff)
32281 dev_info(&urb->dev->dev, "seqnum max\n");
32282
32283 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32284 return -ENOMEM;
32285 }
32286
32287 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32288 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32289 if (unlink->seqnum == 0xffff)
32290 pr_info("seqnum max\n");
32291
32292 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32293 vdev->rhport = rhport;
32294 }
32295
32296 - atomic_set(&vhci->seqnum, 0);
32297 + atomic_set_unchecked(&vhci->seqnum, 0);
32298 spin_lock_init(&vhci->lock);
32299
32300 hcd->power_budget = 0; /* no limit */
32301 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32302 --- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32303 +++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32304 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32305 if (!urb) {
32306 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32307 pr_info("max seqnum %d\n",
32308 - atomic_read(&the_controller->seqnum));
32309 + atomic_read_unchecked(&the_controller->seqnum));
32310 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32311 return;
32312 }
32313 diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32314 --- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32315 +++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32316 @@ -79,14 +79,13 @@ static int msglevel
32317 *
32318 */
32319
32320 +static net_device_ops_no_const apdev_netdev_ops;
32321 +
32322 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32323 {
32324 PSDevice apdev_priv;
32325 struct net_device *dev = pDevice->dev;
32326 int ret;
32327 - const struct net_device_ops apdev_netdev_ops = {
32328 - .ndo_start_xmit = pDevice->tx_80211,
32329 - };
32330
32331 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32332
32333 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32334 *apdev_priv = *pDevice;
32335 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32336
32337 + /* only half broken now */
32338 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32339 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32340
32341 pDevice->apdev->type = ARPHRD_IEEE80211;
32342 diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32343 --- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32344 +++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32345 @@ -80,14 +80,13 @@ static int msglevel
32346 *
32347 */
32348
32349 +static net_device_ops_no_const apdev_netdev_ops;
32350 +
32351 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32352 {
32353 PSDevice apdev_priv;
32354 struct net_device *dev = pDevice->dev;
32355 int ret;
32356 - const struct net_device_ops apdev_netdev_ops = {
32357 - .ndo_start_xmit = pDevice->tx_80211,
32358 - };
32359
32360 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32361
32362 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32363 *apdev_priv = *pDevice;
32364 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32365
32366 + /* only half broken now */
32367 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32368 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32369
32370 pDevice->apdev->type = ARPHRD_IEEE80211;
32371 diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32372 --- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32373 +++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32374 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32375
32376 struct usbctlx_completor {
32377 int (*complete) (struct usbctlx_completor *);
32378 -};
32379 +} __no_const;
32380
32381 static int
32382 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32383 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32384 --- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32385 +++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32386 @@ -39,7 +39,7 @@
32387 * A tmem host implementation must use this function to register callbacks
32388 * for memory allocation.
32389 */
32390 -static struct tmem_hostops tmem_hostops;
32391 +static tmem_hostops_no_const tmem_hostops;
32392
32393 static void tmem_objnode_tree_init(void);
32394
32395 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32396 * A tmem host implementation must use this function to register
32397 * callbacks for a page-accessible memory (PAM) implementation
32398 */
32399 -static struct tmem_pamops tmem_pamops;
32400 +static tmem_pamops_no_const tmem_pamops;
32401
32402 void tmem_register_pamops(struct tmem_pamops *m)
32403 {
32404 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32405 --- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32406 +++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32407 @@ -171,6 +171,7 @@ struct tmem_pamops {
32408 int (*get_data)(struct page *, void *, struct tmem_pool *);
32409 void (*free)(void *, struct tmem_pool *);
32410 };
32411 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32412 extern void tmem_register_pamops(struct tmem_pamops *m);
32413
32414 /* memory allocation methods provided by the host implementation */
32415 @@ -180,6 +181,7 @@ struct tmem_hostops {
32416 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32417 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32418 };
32419 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32420 extern void tmem_register_hostops(struct tmem_hostops *m);
32421
32422 /* core tmem accessor functions */
32423 diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32424 --- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32425 +++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32426 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32427 char path[ALUA_METADATA_PATH_LEN];
32428 int len;
32429
32430 + pax_track_stack();
32431 +
32432 memset(path, 0, ALUA_METADATA_PATH_LEN);
32433
32434 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32435 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32436 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32437 int len;
32438
32439 + pax_track_stack();
32440 +
32441 memset(path, 0, ALUA_METADATA_PATH_LEN);
32442 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32443
32444 diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32445 --- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32446 +++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32447 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32448 int length = 0;
32449 unsigned char buf[SE_MODE_PAGE_BUF];
32450
32451 + pax_track_stack();
32452 +
32453 memset(buf, 0, SE_MODE_PAGE_BUF);
32454
32455 switch (cdb[2] & 0x3f) {
32456 diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
32457 --- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32458 +++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32459 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32460 ssize_t len = 0;
32461 int reg_count = 0, prf_isid;
32462
32463 + pax_track_stack();
32464 +
32465 if (!(su_dev->se_dev_ptr))
32466 return -ENODEV;
32467
32468 diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
32469 --- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32470 +++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32471 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32472 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32473 u16 tpgt;
32474
32475 + pax_track_stack();
32476 +
32477 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32478 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32479 /*
32480 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32481 ssize_t len = 0;
32482 int reg_count = 0;
32483
32484 + pax_track_stack();
32485 +
32486 memset(buf, 0, pr_aptpl_buf_len);
32487 /*
32488 * Called to clear metadata once APTPL has been deactivated.
32489 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32490 char path[512];
32491 int ret;
32492
32493 + pax_track_stack();
32494 +
32495 memset(iov, 0, sizeof(struct iovec));
32496 memset(path, 0, 512);
32497
32498 diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
32499 --- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32500 +++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32501 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32502 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32503 T_TASK(cmd)->t_task_cdbs,
32504 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32505 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32506 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32507 atomic_read(&T_TASK(cmd)->t_transport_active),
32508 atomic_read(&T_TASK(cmd)->t_transport_stop),
32509 atomic_read(&T_TASK(cmd)->t_transport_sent));
32510 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32511 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32512 " task: %p, t_fe_count: %d dev: %p\n", task,
32513 fe_count, dev);
32514 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32515 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32516 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32517 flags);
32518 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32519 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32520 }
32521 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32522 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32523 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32524 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32525 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32526 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32527
32528 diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
32529 --- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32530 +++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32531 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32532
32533 dev->queue_depth = dev_limits->queue_depth;
32534 atomic_set(&dev->depth_left, dev->queue_depth);
32535 - atomic_set(&dev->dev_ordered_id, 0);
32536 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32537
32538 se_dev_set_default_attribs(dev, dev_limits);
32539
32540 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32541 * Used to determine when ORDERED commands should go from
32542 * Dormant to Active status.
32543 */
32544 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32545 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32546 smp_mb__after_atomic_inc();
32547 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32548 cmd->se_ordered_id, cmd->sam_task_attr,
32549 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32550 " t_transport_active: %d t_transport_stop: %d"
32551 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32552 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32553 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32554 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32555 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32556 atomic_read(&T_TASK(cmd)->t_transport_active),
32557 atomic_read(&T_TASK(cmd)->t_transport_stop),
32558 @@ -2673,9 +2673,9 @@ check_depth:
32559 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32560 atomic_set(&task->task_active, 1);
32561 atomic_set(&task->task_sent, 1);
32562 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32563 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32564
32565 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32566 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32567 T_TASK(cmd)->t_task_cdbs)
32568 atomic_set(&cmd->transport_sent, 1);
32569
32570 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32571 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32572 }
32573 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32574 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32575 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32576 goto remove;
32577
32578 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32579 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32580 {
32581 int ret = 0;
32582
32583 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32584 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32585 if (!(send_status) ||
32586 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32587 return 1;
32588 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32589 */
32590 if (cmd->data_direction == DMA_TO_DEVICE) {
32591 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32592 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32593 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32594 smp_mb__after_atomic_inc();
32595 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32596 transport_new_cmd_failure(cmd);
32597 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32598 CMD_TFO(cmd)->get_task_tag(cmd),
32599 T_TASK(cmd)->t_task_cdbs,
32600 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32601 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32602 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32603 atomic_read(&T_TASK(cmd)->t_transport_active),
32604 atomic_read(&T_TASK(cmd)->t_transport_stop),
32605 atomic_read(&T_TASK(cmd)->t_transport_sent));
32606 diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
32607 --- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32608 +++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32609 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32610 bool mContinue;
32611 char *pIn, *pOut;
32612
32613 + pax_track_stack();
32614 +
32615 if (!SCI_Prepare(j))
32616 return 0;
32617
32618 diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
32619 --- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32620 +++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32621 @@ -83,6 +83,7 @@
32622 #include <asm/hvcserver.h>
32623 #include <asm/uaccess.h>
32624 #include <asm/vio.h>
32625 +#include <asm/local.h>
32626
32627 /*
32628 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32629 @@ -270,7 +271,7 @@ struct hvcs_struct {
32630 unsigned int index;
32631
32632 struct tty_struct *tty;
32633 - int open_count;
32634 + local_t open_count;
32635
32636 /*
32637 * Used to tell the driver kernel_thread what operations need to take
32638 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32639
32640 spin_lock_irqsave(&hvcsd->lock, flags);
32641
32642 - if (hvcsd->open_count > 0) {
32643 + if (local_read(&hvcsd->open_count) > 0) {
32644 spin_unlock_irqrestore(&hvcsd->lock, flags);
32645 printk(KERN_INFO "HVCS: vterm state unchanged. "
32646 "The hvcs device node is still in use.\n");
32647 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32648 if ((retval = hvcs_partner_connect(hvcsd)))
32649 goto error_release;
32650
32651 - hvcsd->open_count = 1;
32652 + local_set(&hvcsd->open_count, 1);
32653 hvcsd->tty = tty;
32654 tty->driver_data = hvcsd;
32655
32656 @@ -1179,7 +1180,7 @@ fast_open:
32657
32658 spin_lock_irqsave(&hvcsd->lock, flags);
32659 kref_get(&hvcsd->kref);
32660 - hvcsd->open_count++;
32661 + local_inc(&hvcsd->open_count);
32662 hvcsd->todo_mask |= HVCS_SCHED_READ;
32663 spin_unlock_irqrestore(&hvcsd->lock, flags);
32664
32665 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32666 hvcsd = tty->driver_data;
32667
32668 spin_lock_irqsave(&hvcsd->lock, flags);
32669 - if (--hvcsd->open_count == 0) {
32670 + if (local_dec_and_test(&hvcsd->open_count)) {
32671
32672 vio_disable_interrupts(hvcsd->vdev);
32673
32674 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32675 free_irq(irq, hvcsd);
32676 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32677 return;
32678 - } else if (hvcsd->open_count < 0) {
32679 + } else if (local_read(&hvcsd->open_count) < 0) {
32680 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32681 " is missmanaged.\n",
32682 - hvcsd->vdev->unit_address, hvcsd->open_count);
32683 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32684 }
32685
32686 spin_unlock_irqrestore(&hvcsd->lock, flags);
32687 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32688
32689 spin_lock_irqsave(&hvcsd->lock, flags);
32690 /* Preserve this so that we know how many kref refs to put */
32691 - temp_open_count = hvcsd->open_count;
32692 + temp_open_count = local_read(&hvcsd->open_count);
32693
32694 /*
32695 * Don't kref put inside the spinlock because the destruction
32696 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32697 hvcsd->tty->driver_data = NULL;
32698 hvcsd->tty = NULL;
32699
32700 - hvcsd->open_count = 0;
32701 + local_set(&hvcsd->open_count, 0);
32702
32703 /* This will drop any buffered data on the floor which is OK in a hangup
32704 * scenario. */
32705 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32706 * the middle of a write operation? This is a crummy place to do this
32707 * but we want to keep it all in the spinlock.
32708 */
32709 - if (hvcsd->open_count <= 0) {
32710 + if (local_read(&hvcsd->open_count) <= 0) {
32711 spin_unlock_irqrestore(&hvcsd->lock, flags);
32712 return -ENODEV;
32713 }
32714 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32715 {
32716 struct hvcs_struct *hvcsd = tty->driver_data;
32717
32718 - if (!hvcsd || hvcsd->open_count <= 0)
32719 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32720 return 0;
32721
32722 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32723 diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
32724 --- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32725 +++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32726 @@ -29,6 +29,7 @@
32727 #include <linux/tty_driver.h>
32728 #include <linux/tty_flip.h>
32729 #include <linux/uaccess.h>
32730 +#include <asm/local.h>
32731
32732 #include "tty.h"
32733 #include "network.h"
32734 @@ -51,7 +52,7 @@ struct ipw_tty {
32735 int tty_type;
32736 struct ipw_network *network;
32737 struct tty_struct *linux_tty;
32738 - int open_count;
32739 + local_t open_count;
32740 unsigned int control_lines;
32741 struct mutex ipw_tty_mutex;
32742 int tx_bytes_queued;
32743 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32744 mutex_unlock(&tty->ipw_tty_mutex);
32745 return -ENODEV;
32746 }
32747 - if (tty->open_count == 0)
32748 + if (local_read(&tty->open_count) == 0)
32749 tty->tx_bytes_queued = 0;
32750
32751 - tty->open_count++;
32752 + local_inc(&tty->open_count);
32753
32754 tty->linux_tty = linux_tty;
32755 linux_tty->driver_data = tty;
32756 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32757
32758 static void do_ipw_close(struct ipw_tty *tty)
32759 {
32760 - tty->open_count--;
32761 -
32762 - if (tty->open_count == 0) {
32763 + if (local_dec_return(&tty->open_count) == 0) {
32764 struct tty_struct *linux_tty = tty->linux_tty;
32765
32766 if (linux_tty != NULL) {
32767 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32768 return;
32769
32770 mutex_lock(&tty->ipw_tty_mutex);
32771 - if (tty->open_count == 0) {
32772 + if (local_read(&tty->open_count) == 0) {
32773 mutex_unlock(&tty->ipw_tty_mutex);
32774 return;
32775 }
32776 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32777 return;
32778 }
32779
32780 - if (!tty->open_count) {
32781 + if (!local_read(&tty->open_count)) {
32782 mutex_unlock(&tty->ipw_tty_mutex);
32783 return;
32784 }
32785 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32786 return -ENODEV;
32787
32788 mutex_lock(&tty->ipw_tty_mutex);
32789 - if (!tty->open_count) {
32790 + if (!local_read(&tty->open_count)) {
32791 mutex_unlock(&tty->ipw_tty_mutex);
32792 return -EINVAL;
32793 }
32794 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32795 if (!tty)
32796 return -ENODEV;
32797
32798 - if (!tty->open_count)
32799 + if (!local_read(&tty->open_count))
32800 return -EINVAL;
32801
32802 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32803 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32804 if (!tty)
32805 return 0;
32806
32807 - if (!tty->open_count)
32808 + if (!local_read(&tty->open_count))
32809 return 0;
32810
32811 return tty->tx_bytes_queued;
32812 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32813 if (!tty)
32814 return -ENODEV;
32815
32816 - if (!tty->open_count)
32817 + if (!local_read(&tty->open_count))
32818 return -EINVAL;
32819
32820 return get_control_lines(tty);
32821 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32822 if (!tty)
32823 return -ENODEV;
32824
32825 - if (!tty->open_count)
32826 + if (!local_read(&tty->open_count))
32827 return -EINVAL;
32828
32829 return set_control_lines(tty, set, clear);
32830 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32831 if (!tty)
32832 return -ENODEV;
32833
32834 - if (!tty->open_count)
32835 + if (!local_read(&tty->open_count))
32836 return -EINVAL;
32837
32838 /* FIXME: Exactly how is the tty object locked here .. */
32839 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32840 against a parallel ioctl etc */
32841 mutex_lock(&ttyj->ipw_tty_mutex);
32842 }
32843 - while (ttyj->open_count)
32844 + while (local_read(&ttyj->open_count))
32845 do_ipw_close(ttyj);
32846 ipwireless_disassociate_network_ttys(network,
32847 ttyj->channel_idx);
32848 diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
32849 --- linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32850 +++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32851 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32852 return NULL;
32853 spin_lock_init(&dlci->lock);
32854 dlci->fifo = &dlci->_fifo;
32855 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32856 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32857 kfree(dlci);
32858 return NULL;
32859 }
32860 diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
32861 --- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32862 +++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32863 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32864 {
32865 *ops = tty_ldisc_N_TTY;
32866 ops->owner = NULL;
32867 - ops->refcount = ops->flags = 0;
32868 + atomic_set(&ops->refcount, 0);
32869 + ops->flags = 0;
32870 }
32871 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32872 diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
32873 --- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32874 +++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32875 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32876 register_sysctl_table(pty_root_table);
32877
32878 /* Now create the /dev/ptmx special device */
32879 + pax_open_kernel();
32880 tty_default_fops(&ptmx_fops);
32881 - ptmx_fops.open = ptmx_open;
32882 + *(void **)&ptmx_fops.open = ptmx_open;
32883 + pax_close_kernel();
32884
32885 cdev_init(&ptmx_cdev, &ptmx_fops);
32886 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32887 diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
32888 --- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32889 +++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32890 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32891 struct rocket_ports tmp;
32892 int board;
32893
32894 + pax_track_stack();
32895 +
32896 if (!retports)
32897 return -EFAULT;
32898 memset(&tmp, 0, sizeof (tmp));
32899 diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
32900 --- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32901 +++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32902 @@ -23,8 +23,9 @@
32903 #define MAX_CONFIG_LEN 40
32904
32905 static struct kgdb_io kgdboc_io_ops;
32906 +static struct kgdb_io kgdboc_io_ops_console;
32907
32908 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32909 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32910 static int configured = -1;
32911
32912 static char config[MAX_CONFIG_LEN];
32913 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32914 kgdboc_unregister_kbd();
32915 if (configured == 1)
32916 kgdb_unregister_io_module(&kgdboc_io_ops);
32917 + else if (configured == 2)
32918 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32919 }
32920
32921 static int configure_kgdboc(void)
32922 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32923 int err;
32924 char *cptr = config;
32925 struct console *cons;
32926 + int is_console = 0;
32927
32928 err = kgdboc_option_setup(config);
32929 if (err || !strlen(config) || isspace(config[0]))
32930 goto noconfig;
32931
32932 err = -ENODEV;
32933 - kgdboc_io_ops.is_console = 0;
32934 kgdb_tty_driver = NULL;
32935
32936 kgdboc_use_kms = 0;
32937 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32938 int idx;
32939 if (cons->device && cons->device(cons, &idx) == p &&
32940 idx == tty_line) {
32941 - kgdboc_io_ops.is_console = 1;
32942 + is_console = 1;
32943 break;
32944 }
32945 cons = cons->next;
32946 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32947 kgdb_tty_line = tty_line;
32948
32949 do_register:
32950 - err = kgdb_register_io_module(&kgdboc_io_ops);
32951 + if (is_console) {
32952 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
32953 + configured = 2;
32954 + } else {
32955 + err = kgdb_register_io_module(&kgdboc_io_ops);
32956 + configured = 1;
32957 + }
32958 if (err)
32959 goto noconfig;
32960
32961 - configured = 1;
32962 -
32963 return 0;
32964
32965 noconfig:
32966 @@ -212,7 +219,7 @@ noconfig:
32967 static int __init init_kgdboc(void)
32968 {
32969 /* Already configured? */
32970 - if (configured == 1)
32971 + if (configured >= 1)
32972 return 0;
32973
32974 return configure_kgdboc();
32975 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32976 if (config[len - 1] == '\n')
32977 config[len - 1] = '\0';
32978
32979 - if (configured == 1)
32980 + if (configured >= 1)
32981 cleanup_kgdboc();
32982
32983 /* Go and configure with the new params. */
32984 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32985 .post_exception = kgdboc_post_exp_handler,
32986 };
32987
32988 +static struct kgdb_io kgdboc_io_ops_console = {
32989 + .name = "kgdboc",
32990 + .read_char = kgdboc_get_char,
32991 + .write_char = kgdboc_put_char,
32992 + .pre_exception = kgdboc_pre_exp_handler,
32993 + .post_exception = kgdboc_post_exp_handler,
32994 + .is_console = 1
32995 +};
32996 +
32997 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32998 /* This is only available if kgdboc is a built in for early debugging */
32999 static int __init kgdboc_early_init(char *opt)
33000 diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33001 --- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33002 +++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33003 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33004 int loop = 1, num, total = 0;
33005 u8 recv_buf[512], *pbuf;
33006
33007 + pax_track_stack();
33008 +
33009 pbuf = recv_buf;
33010 do {
33011 num = max3110_read_multi(max, pbuf);
33012 diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33013 --- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33014 +++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33015 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33016
33017 void tty_default_fops(struct file_operations *fops)
33018 {
33019 - *fops = tty_fops;
33020 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33021 }
33022
33023 /*
33024 diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33025 --- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33026 +++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33027 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33028 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33029 struct tty_ldisc_ops *ldo = ld->ops;
33030
33031 - ldo->refcount--;
33032 + atomic_dec(&ldo->refcount);
33033 module_put(ldo->owner);
33034 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33035
33036 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33037 spin_lock_irqsave(&tty_ldisc_lock, flags);
33038 tty_ldiscs[disc] = new_ldisc;
33039 new_ldisc->num = disc;
33040 - new_ldisc->refcount = 0;
33041 + atomic_set(&new_ldisc->refcount, 0);
33042 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33043
33044 return ret;
33045 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33046 return -EINVAL;
33047
33048 spin_lock_irqsave(&tty_ldisc_lock, flags);
33049 - if (tty_ldiscs[disc]->refcount)
33050 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33051 ret = -EBUSY;
33052 else
33053 tty_ldiscs[disc] = NULL;
33054 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33055 if (ldops) {
33056 ret = ERR_PTR(-EAGAIN);
33057 if (try_module_get(ldops->owner)) {
33058 - ldops->refcount++;
33059 + atomic_inc(&ldops->refcount);
33060 ret = ldops;
33061 }
33062 }
33063 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33064 unsigned long flags;
33065
33066 spin_lock_irqsave(&tty_ldisc_lock, flags);
33067 - ldops->refcount--;
33068 + atomic_dec(&ldops->refcount);
33069 module_put(ldops->owner);
33070 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33071 }
33072 diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33073 --- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33074 +++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33075 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33076 kbd->kbdmode == VC_OFF) &&
33077 value != KVAL(K_SAK))
33078 return; /* SAK is allowed even in raw mode */
33079 +
33080 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33081 + {
33082 + void *func = fn_handler[value];
33083 + if (func == fn_show_state || func == fn_show_ptregs ||
33084 + func == fn_show_mem)
33085 + return;
33086 + }
33087 +#endif
33088 +
33089 fn_handler[value](vc);
33090 }
33091
33092 diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33093 --- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33094 +++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33095 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33096
33097 static void notify_write(struct vc_data *vc, unsigned int unicode)
33098 {
33099 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33100 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33101 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33102 }
33103
33104 diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33105 --- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33106 +++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33107 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33108 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33109 return -EFAULT;
33110
33111 - if (!capable(CAP_SYS_TTY_CONFIG))
33112 - perm = 0;
33113 -
33114 switch (cmd) {
33115 case KDGKBENT:
33116 key_map = key_maps[s];
33117 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33118 val = (i ? K_HOLE : K_NOSUCHMAP);
33119 return put_user(val, &user_kbe->kb_value);
33120 case KDSKBENT:
33121 + if (!capable(CAP_SYS_TTY_CONFIG))
33122 + perm = 0;
33123 +
33124 if (!perm)
33125 return -EPERM;
33126 if (!i && v == K_NOSUCHMAP) {
33127 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33128 int i, j, k;
33129 int ret;
33130
33131 - if (!capable(CAP_SYS_TTY_CONFIG))
33132 - perm = 0;
33133 -
33134 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33135 if (!kbs) {
33136 ret = -ENOMEM;
33137 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33138 kfree(kbs);
33139 return ((p && *p) ? -EOVERFLOW : 0);
33140 case KDSKBSENT:
33141 + if (!capable(CAP_SYS_TTY_CONFIG))
33142 + perm = 0;
33143 +
33144 if (!perm) {
33145 ret = -EPERM;
33146 goto reterr;
33147 diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33148 --- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33149 +++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33150 @@ -25,6 +25,7 @@
33151 #include <linux/kobject.h>
33152 #include <linux/cdev.h>
33153 #include <linux/uio_driver.h>
33154 +#include <asm/local.h>
33155
33156 #define UIO_MAX_DEVICES (1U << MINORBITS)
33157
33158 @@ -32,10 +33,10 @@ struct uio_device {
33159 struct module *owner;
33160 struct device *dev;
33161 int minor;
33162 - atomic_t event;
33163 + atomic_unchecked_t event;
33164 struct fasync_struct *async_queue;
33165 wait_queue_head_t wait;
33166 - int vma_count;
33167 + local_t vma_count;
33168 struct uio_info *info;
33169 struct kobject *map_dir;
33170 struct kobject *portio_dir;
33171 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33172 struct device_attribute *attr, char *buf)
33173 {
33174 struct uio_device *idev = dev_get_drvdata(dev);
33175 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33176 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33177 }
33178
33179 static struct device_attribute uio_class_attributes[] = {
33180 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33181 {
33182 struct uio_device *idev = info->uio_dev;
33183
33184 - atomic_inc(&idev->event);
33185 + atomic_inc_unchecked(&idev->event);
33186 wake_up_interruptible(&idev->wait);
33187 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33188 }
33189 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33190 }
33191
33192 listener->dev = idev;
33193 - listener->event_count = atomic_read(&idev->event);
33194 + listener->event_count = atomic_read_unchecked(&idev->event);
33195 filep->private_data = listener;
33196
33197 if (idev->info->open) {
33198 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33199 return -EIO;
33200
33201 poll_wait(filep, &idev->wait, wait);
33202 - if (listener->event_count != atomic_read(&idev->event))
33203 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33204 return POLLIN | POLLRDNORM;
33205 return 0;
33206 }
33207 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33208 do {
33209 set_current_state(TASK_INTERRUPTIBLE);
33210
33211 - event_count = atomic_read(&idev->event);
33212 + event_count = atomic_read_unchecked(&idev->event);
33213 if (event_count != listener->event_count) {
33214 if (copy_to_user(buf, &event_count, count))
33215 retval = -EFAULT;
33216 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33217 static void uio_vma_open(struct vm_area_struct *vma)
33218 {
33219 struct uio_device *idev = vma->vm_private_data;
33220 - idev->vma_count++;
33221 + local_inc(&idev->vma_count);
33222 }
33223
33224 static void uio_vma_close(struct vm_area_struct *vma)
33225 {
33226 struct uio_device *idev = vma->vm_private_data;
33227 - idev->vma_count--;
33228 + local_dec(&idev->vma_count);
33229 }
33230
33231 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33232 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33233 idev->owner = owner;
33234 idev->info = info;
33235 init_waitqueue_head(&idev->wait);
33236 - atomic_set(&idev->event, 0);
33237 + atomic_set_unchecked(&idev->event, 0);
33238
33239 ret = uio_get_minor(idev);
33240 if (ret)
33241 diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33242 --- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33243 +++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33244 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33245 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33246 if (ret < 2)
33247 return -EINVAL;
33248 - if (index < 0 || index > 0x7f)
33249 + if (index > 0x7f)
33250 return -EINVAL;
33251 pos += tmp;
33252
33253 diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33254 --- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33255 +++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33256 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33257 if (printk_ratelimit())
33258 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33259 __func__, vpi, vci);
33260 - atomic_inc(&vcc->stats->rx_err);
33261 + atomic_inc_unchecked(&vcc->stats->rx_err);
33262 return;
33263 }
33264
33265 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33266 if (length > ATM_MAX_AAL5_PDU) {
33267 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33268 __func__, length, vcc);
33269 - atomic_inc(&vcc->stats->rx_err);
33270 + atomic_inc_unchecked(&vcc->stats->rx_err);
33271 goto out;
33272 }
33273
33274 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33275 if (sarb->len < pdu_length) {
33276 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33277 __func__, pdu_length, sarb->len, vcc);
33278 - atomic_inc(&vcc->stats->rx_err);
33279 + atomic_inc_unchecked(&vcc->stats->rx_err);
33280 goto out;
33281 }
33282
33283 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33284 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33285 __func__, vcc);
33286 - atomic_inc(&vcc->stats->rx_err);
33287 + atomic_inc_unchecked(&vcc->stats->rx_err);
33288 goto out;
33289 }
33290
33291 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33292 if (printk_ratelimit())
33293 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33294 __func__, length);
33295 - atomic_inc(&vcc->stats->rx_drop);
33296 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33297 goto out;
33298 }
33299
33300 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33301
33302 vcc->push(vcc, skb);
33303
33304 - atomic_inc(&vcc->stats->rx);
33305 + atomic_inc_unchecked(&vcc->stats->rx);
33306 out:
33307 skb_trim(sarb, 0);
33308 }
33309 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33310 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33311
33312 usbatm_pop(vcc, skb);
33313 - atomic_inc(&vcc->stats->tx);
33314 + atomic_inc_unchecked(&vcc->stats->tx);
33315
33316 skb = skb_dequeue(&instance->sndqueue);
33317 }
33318 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33319 if (!left--)
33320 return sprintf(page,
33321 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33322 - atomic_read(&atm_dev->stats.aal5.tx),
33323 - atomic_read(&atm_dev->stats.aal5.tx_err),
33324 - atomic_read(&atm_dev->stats.aal5.rx),
33325 - atomic_read(&atm_dev->stats.aal5.rx_err),
33326 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33327 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33328 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33329 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33330 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33331 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33332
33333 if (!left--) {
33334 if (instance->disconnected)
33335 diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33336 --- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33337 +++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33338 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33339 * time it gets called.
33340 */
33341 static struct device_connect_event {
33342 - atomic_t count;
33343 + atomic_unchecked_t count;
33344 wait_queue_head_t wait;
33345 } device_event = {
33346 .count = ATOMIC_INIT(1),
33347 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33348
33349 void usbfs_conn_disc_event(void)
33350 {
33351 - atomic_add(2, &device_event.count);
33352 + atomic_add_unchecked(2, &device_event.count);
33353 wake_up(&device_event.wait);
33354 }
33355
33356 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33357
33358 poll_wait(file, &device_event.wait, wait);
33359
33360 - event_count = atomic_read(&device_event.count);
33361 + event_count = atomic_read_unchecked(&device_event.count);
33362 if (file->f_version != event_count) {
33363 file->f_version = event_count;
33364 return POLLIN | POLLRDNORM;
33365 diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33366 --- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33367 +++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33368 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33369 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33370 if (buf) {
33371 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33372 - if (len > 0) {
33373 - smallbuf = kmalloc(++len, GFP_NOIO);
33374 + if (len++ > 0) {
33375 + smallbuf = kmalloc(len, GFP_NOIO);
33376 if (!smallbuf)
33377 return buf;
33378 memcpy(smallbuf, buf, len);
33379 diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33380 --- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33381 +++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33382 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33383
33384 #ifdef CONFIG_KGDB
33385 static struct kgdb_io kgdbdbgp_io_ops;
33386 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33387 +static struct kgdb_io kgdbdbgp_io_ops_console;
33388 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33389 #else
33390 #define dbgp_kgdb_mode (0)
33391 #endif
33392 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33393 .write_char = kgdbdbgp_write_char,
33394 };
33395
33396 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33397 + .name = "kgdbdbgp",
33398 + .read_char = kgdbdbgp_read_char,
33399 + .write_char = kgdbdbgp_write_char,
33400 + .is_console = 1
33401 +};
33402 +
33403 static int kgdbdbgp_wait_time;
33404
33405 static int __init kgdbdbgp_parse_config(char *str)
33406 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33407 ptr++;
33408 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33409 }
33410 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33411 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33412 + if (early_dbgp_console.index != -1)
33413 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33414 + else
33415 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33416
33417 return 0;
33418 }
33419 diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33420 --- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33421 +++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33422 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33423 unsigned int num_tests;
33424 int i, ret;
33425
33426 + pax_track_stack();
33427 +
33428 num_tests = ARRAY_SIZE(simple_test_vector);
33429 for (i = 0; i < num_tests; i++) {
33430 ret = xhci_test_trb_in_td(xhci,
33431 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33432 --- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33433 +++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33434 @@ -192,7 +192,7 @@ struct wahc {
33435 struct list_head xfer_delayed_list;
33436 spinlock_t xfer_list_lock;
33437 struct work_struct xfer_work;
33438 - atomic_t xfer_id_count;
33439 + atomic_unchecked_t xfer_id_count;
33440 };
33441
33442
33443 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33444 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33445 spin_lock_init(&wa->xfer_list_lock);
33446 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33447 - atomic_set(&wa->xfer_id_count, 1);
33448 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33449 }
33450
33451 /**
33452 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
33453 --- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33454 +++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33455 @@ -294,7 +294,7 @@ out:
33456 */
33457 static void wa_xfer_id_init(struct wa_xfer *xfer)
33458 {
33459 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33460 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33461 }
33462
33463 /*
33464 diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
33465 --- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33466 +++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33467 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33468 return get_user(vq->last_used_idx, &used->idx);
33469 }
33470
33471 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33472 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33473 {
33474 struct file *eventfp, *filep = NULL,
33475 *pollstart = NULL, *pollstop = NULL;
33476 diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
33477 --- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33478 +++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33479 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33480 rc = -ENODEV;
33481 goto out;
33482 }
33483 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33484 - !info->fbops->fb_setcmap)) {
33485 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33486 rc = -EINVAL;
33487 goto out1;
33488 }
33489 diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
33490 --- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33491 +++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33492 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33493 image->dx += image->width + 8;
33494 }
33495 } else if (rotate == FB_ROTATE_UD) {
33496 - for (x = 0; x < num && image->dx >= 0; x++) {
33497 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33498 info->fbops->fb_imageblit(info, image);
33499 image->dx -= image->width + 8;
33500 }
33501 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33502 image->dy += image->height + 8;
33503 }
33504 } else if (rotate == FB_ROTATE_CCW) {
33505 - for (x = 0; x < num && image->dy >= 0; x++) {
33506 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33507 info->fbops->fb_imageblit(info, image);
33508 image->dy -= image->height + 8;
33509 }
33510 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33511 int flags = info->flags;
33512 int ret = 0;
33513
33514 + pax_track_stack();
33515 +
33516 if (var->activate & FB_ACTIVATE_INV_MODE) {
33517 struct fb_videomode mode1, mode2;
33518
33519 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33520 void __user *argp = (void __user *)arg;
33521 long ret = 0;
33522
33523 + pax_track_stack();
33524 +
33525 switch (cmd) {
33526 case FBIOGET_VSCREENINFO:
33527 if (!lock_fb_info(info))
33528 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33529 return -EFAULT;
33530 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33531 return -EINVAL;
33532 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33533 + if (con2fb.framebuffer >= FB_MAX)
33534 return -EINVAL;
33535 if (!registered_fb[con2fb.framebuffer])
33536 request_module("fb%d", con2fb.framebuffer);
33537 diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
33538 --- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33539 +++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33540 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33541 }
33542 }
33543 printk("ringbuffer lockup!!!\n");
33544 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33545 i810_report_error(mmio);
33546 par->dev_flags |= LOCKUP;
33547 info->pixmap.scan_align = 1;
33548 diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
33549 --- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
33550 +++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
33551 @@ -1,1604 +1,1123 @@
33552 P3
33553 -# Standard 224-color Linux logo
33554 80 80
33555 255
33556 - 0 0 0 0 0 0 0 0 0 0 0 0
33557 - 0 0 0 0 0 0 0 0 0 0 0 0
33558 - 0 0 0 0 0 0 0 0 0 0 0 0
33559 - 0 0 0 0 0 0 0 0 0 0 0 0
33560 - 0 0 0 0 0 0 0 0 0 0 0 0
33561 - 0 0 0 0 0 0 0 0 0 0 0 0
33562 - 0 0 0 0 0 0 0 0 0 0 0 0
33563 - 0 0 0 0 0 0 0 0 0 0 0 0
33564 - 0 0 0 0 0 0 0 0 0 0 0 0
33565 - 6 6 6 6 6 6 10 10 10 10 10 10
33566 - 10 10 10 6 6 6 6 6 6 6 6 6
33567 - 0 0 0 0 0 0 0 0 0 0 0 0
33568 - 0 0 0 0 0 0 0 0 0 0 0 0
33569 - 0 0 0 0 0 0 0 0 0 0 0 0
33570 - 0 0 0 0 0 0 0 0 0 0 0 0
33571 - 0 0 0 0 0 0 0 0 0 0 0 0
33572 - 0 0 0 0 0 0 0 0 0 0 0 0
33573 - 0 0 0 0 0 0 0 0 0 0 0 0
33574 - 0 0 0 0 0 0 0 0 0 0 0 0
33575 - 0 0 0 0 0 0 0 0 0 0 0 0
33576 - 0 0 0 0 0 0 0 0 0 0 0 0
33577 - 0 0 0 0 0 0 0 0 0 0 0 0
33578 - 0 0 0 0 0 0 0 0 0 0 0 0
33579 - 0 0 0 0 0 0 0 0 0 0 0 0
33580 - 0 0 0 0 0 0 0 0 0 0 0 0
33581 - 0 0 0 0 0 0 0 0 0 0 0 0
33582 - 0 0 0 0 0 0 0 0 0 0 0 0
33583 - 0 0 0 0 0 0 0 0 0 0 0 0
33584 - 0 0 0 6 6 6 10 10 10 14 14 14
33585 - 22 22 22 26 26 26 30 30 30 34 34 34
33586 - 30 30 30 30 30 30 26 26 26 18 18 18
33587 - 14 14 14 10 10 10 6 6 6 0 0 0
33588 - 0 0 0 0 0 0 0 0 0 0 0 0
33589 - 0 0 0 0 0 0 0 0 0 0 0 0
33590 - 0 0 0 0 0 0 0 0 0 0 0 0
33591 - 0 0 0 0 0 0 0 0 0 0 0 0
33592 - 0 0 0 0 0 0 0 0 0 0 0 0
33593 - 0 0 0 0 0 0 0 0 0 0 0 0
33594 - 0 0 0 0 0 0 0 0 0 0 0 0
33595 - 0 0 0 0 0 0 0 0 0 0 0 0
33596 - 0 0 0 0 0 0 0 0 0 0 0 0
33597 - 0 0 0 0 0 1 0 0 1 0 0 0
33598 - 0 0 0 0 0 0 0 0 0 0 0 0
33599 - 0 0 0 0 0 0 0 0 0 0 0 0
33600 - 0 0 0 0 0 0 0 0 0 0 0 0
33601 - 0 0 0 0 0 0 0 0 0 0 0 0
33602 - 0 0 0 0 0 0 0 0 0 0 0 0
33603 - 0 0 0 0 0 0 0 0 0 0 0 0
33604 - 6 6 6 14 14 14 26 26 26 42 42 42
33605 - 54 54 54 66 66 66 78 78 78 78 78 78
33606 - 78 78 78 74 74 74 66 66 66 54 54 54
33607 - 42 42 42 26 26 26 18 18 18 10 10 10
33608 - 6 6 6 0 0 0 0 0 0 0 0 0
33609 - 0 0 0 0 0 0 0 0 0 0 0 0
33610 - 0 0 0 0 0 0 0 0 0 0 0 0
33611 - 0 0 0 0 0 0 0 0 0 0 0 0
33612 - 0 0 0 0 0 0 0 0 0 0 0 0
33613 - 0 0 0 0 0 0 0 0 0 0 0 0
33614 - 0 0 0 0 0 0 0 0 0 0 0 0
33615 - 0 0 0 0 0 0 0 0 0 0 0 0
33616 - 0 0 0 0 0 0 0 0 0 0 0 0
33617 - 0 0 1 0 0 0 0 0 0 0 0 0
33618 - 0 0 0 0 0 0 0 0 0 0 0 0
33619 - 0 0 0 0 0 0 0 0 0 0 0 0
33620 - 0 0 0 0 0 0 0 0 0 0 0 0
33621 - 0 0 0 0 0 0 0 0 0 0 0 0
33622 - 0 0 0 0 0 0 0 0 0 0 0 0
33623 - 0 0 0 0 0 0 0 0 0 10 10 10
33624 - 22 22 22 42 42 42 66 66 66 86 86 86
33625 - 66 66 66 38 38 38 38 38 38 22 22 22
33626 - 26 26 26 34 34 34 54 54 54 66 66 66
33627 - 86 86 86 70 70 70 46 46 46 26 26 26
33628 - 14 14 14 6 6 6 0 0 0 0 0 0
33629 - 0 0 0 0 0 0 0 0 0 0 0 0
33630 - 0 0 0 0 0 0 0 0 0 0 0 0
33631 - 0 0 0 0 0 0 0 0 0 0 0 0
33632 - 0 0 0 0 0 0 0 0 0 0 0 0
33633 - 0 0 0 0 0 0 0 0 0 0 0 0
33634 - 0 0 0 0 0 0 0 0 0 0 0 0
33635 - 0 0 0 0 0 0 0 0 0 0 0 0
33636 - 0 0 0 0 0 0 0 0 0 0 0 0
33637 - 0 0 1 0 0 1 0 0 1 0 0 0
33638 - 0 0 0 0 0 0 0 0 0 0 0 0
33639 - 0 0 0 0 0 0 0 0 0 0 0 0
33640 - 0 0 0 0 0 0 0 0 0 0 0 0
33641 - 0 0 0 0 0 0 0 0 0 0 0 0
33642 - 0 0 0 0 0 0 0 0 0 0 0 0
33643 - 0 0 0 0 0 0 10 10 10 26 26 26
33644 - 50 50 50 82 82 82 58 58 58 6 6 6
33645 - 2 2 6 2 2 6 2 2 6 2 2 6
33646 - 2 2 6 2 2 6 2 2 6 2 2 6
33647 - 6 6 6 54 54 54 86 86 86 66 66 66
33648 - 38 38 38 18 18 18 6 6 6 0 0 0
33649 - 0 0 0 0 0 0 0 0 0 0 0 0
33650 - 0 0 0 0 0 0 0 0 0 0 0 0
33651 - 0 0 0 0 0 0 0 0 0 0 0 0
33652 - 0 0 0 0 0 0 0 0 0 0 0 0
33653 - 0 0 0 0 0 0 0 0 0 0 0 0
33654 - 0 0 0 0 0 0 0 0 0 0 0 0
33655 - 0 0 0 0 0 0 0 0 0 0 0 0
33656 - 0 0 0 0 0 0 0 0 0 0 0 0
33657 - 0 0 0 0 0 0 0 0 0 0 0 0
33658 - 0 0 0 0 0 0 0 0 0 0 0 0
33659 - 0 0 0 0 0 0 0 0 0 0 0 0
33660 - 0 0 0 0 0 0 0 0 0 0 0 0
33661 - 0 0 0 0 0 0 0 0 0 0 0 0
33662 - 0 0 0 0 0 0 0 0 0 0 0 0
33663 - 0 0 0 6 6 6 22 22 22 50 50 50
33664 - 78 78 78 34 34 34 2 2 6 2 2 6
33665 - 2 2 6 2 2 6 2 2 6 2 2 6
33666 - 2 2 6 2 2 6 2 2 6 2 2 6
33667 - 2 2 6 2 2 6 6 6 6 70 70 70
33668 - 78 78 78 46 46 46 22 22 22 6 6 6
33669 - 0 0 0 0 0 0 0 0 0 0 0 0
33670 - 0 0 0 0 0 0 0 0 0 0 0 0
33671 - 0 0 0 0 0 0 0 0 0 0 0 0
33672 - 0 0 0 0 0 0 0 0 0 0 0 0
33673 - 0 0 0 0 0 0 0 0 0 0 0 0
33674 - 0 0 0 0 0 0 0 0 0 0 0 0
33675 - 0 0 0 0 0 0 0 0 0 0 0 0
33676 - 0 0 0 0 0 0 0 0 0 0 0 0
33677 - 0 0 1 0 0 1 0 0 1 0 0 0
33678 - 0 0 0 0 0 0 0 0 0 0 0 0
33679 - 0 0 0 0 0 0 0 0 0 0 0 0
33680 - 0 0 0 0 0 0 0 0 0 0 0 0
33681 - 0 0 0 0 0 0 0 0 0 0 0 0
33682 - 0 0 0 0 0 0 0 0 0 0 0 0
33683 - 6 6 6 18 18 18 42 42 42 82 82 82
33684 - 26 26 26 2 2 6 2 2 6 2 2 6
33685 - 2 2 6 2 2 6 2 2 6 2 2 6
33686 - 2 2 6 2 2 6 2 2 6 14 14 14
33687 - 46 46 46 34 34 34 6 6 6 2 2 6
33688 - 42 42 42 78 78 78 42 42 42 18 18 18
33689 - 6 6 6 0 0 0 0 0 0 0 0 0
33690 - 0 0 0 0 0 0 0 0 0 0 0 0
33691 - 0 0 0 0 0 0 0 0 0 0 0 0
33692 - 0 0 0 0 0 0 0 0 0 0 0 0
33693 - 0 0 0 0 0 0 0 0 0 0 0 0
33694 - 0 0 0 0 0 0 0 0 0 0 0 0
33695 - 0 0 0 0 0 0 0 0 0 0 0 0
33696 - 0 0 0 0 0 0 0 0 0 0 0 0
33697 - 0 0 1 0 0 0 0 0 1 0 0 0
33698 - 0 0 0 0 0 0 0 0 0 0 0 0
33699 - 0 0 0 0 0 0 0 0 0 0 0 0
33700 - 0 0 0 0 0 0 0 0 0 0 0 0
33701 - 0 0 0 0 0 0 0 0 0 0 0 0
33702 - 0 0 0 0 0 0 0 0 0 0 0 0
33703 - 10 10 10 30 30 30 66 66 66 58 58 58
33704 - 2 2 6 2 2 6 2 2 6 2 2 6
33705 - 2 2 6 2 2 6 2 2 6 2 2 6
33706 - 2 2 6 2 2 6 2 2 6 26 26 26
33707 - 86 86 86 101 101 101 46 46 46 10 10 10
33708 - 2 2 6 58 58 58 70 70 70 34 34 34
33709 - 10 10 10 0 0 0 0 0 0 0 0 0
33710 - 0 0 0 0 0 0 0 0 0 0 0 0
33711 - 0 0 0 0 0 0 0 0 0 0 0 0
33712 - 0 0 0 0 0 0 0 0 0 0 0 0
33713 - 0 0 0 0 0 0 0 0 0 0 0 0
33714 - 0 0 0 0 0 0 0 0 0 0 0 0
33715 - 0 0 0 0 0 0 0 0 0 0 0 0
33716 - 0 0 0 0 0 0 0 0 0 0 0 0
33717 - 0 0 1 0 0 1 0 0 1 0 0 0
33718 - 0 0 0 0 0 0 0 0 0 0 0 0
33719 - 0 0 0 0 0 0 0 0 0 0 0 0
33720 - 0 0 0 0 0 0 0 0 0 0 0 0
33721 - 0 0 0 0 0 0 0 0 0 0 0 0
33722 - 0 0 0 0 0 0 0 0 0 0 0 0
33723 - 14 14 14 42 42 42 86 86 86 10 10 10
33724 - 2 2 6 2 2 6 2 2 6 2 2 6
33725 - 2 2 6 2 2 6 2 2 6 2 2 6
33726 - 2 2 6 2 2 6 2 2 6 30 30 30
33727 - 94 94 94 94 94 94 58 58 58 26 26 26
33728 - 2 2 6 6 6 6 78 78 78 54 54 54
33729 - 22 22 22 6 6 6 0 0 0 0 0 0
33730 - 0 0 0 0 0 0 0 0 0 0 0 0
33731 - 0 0 0 0 0 0 0 0 0 0 0 0
33732 - 0 0 0 0 0 0 0 0 0 0 0 0
33733 - 0 0 0 0 0 0 0 0 0 0 0 0
33734 - 0 0 0 0 0 0 0 0 0 0 0 0
33735 - 0 0 0 0 0 0 0 0 0 0 0 0
33736 - 0 0 0 0 0 0 0 0 0 0 0 0
33737 - 0 0 0 0 0 0 0 0 0 0 0 0
33738 - 0 0 0 0 0 0 0 0 0 0 0 0
33739 - 0 0 0 0 0 0 0 0 0 0 0 0
33740 - 0 0 0 0 0 0 0 0 0 0 0 0
33741 - 0 0 0 0 0 0 0 0 0 0 0 0
33742 - 0 0 0 0 0 0 0 0 0 6 6 6
33743 - 22 22 22 62 62 62 62 62 62 2 2 6
33744 - 2 2 6 2 2 6 2 2 6 2 2 6
33745 - 2 2 6 2 2 6 2 2 6 2 2 6
33746 - 2 2 6 2 2 6 2 2 6 26 26 26
33747 - 54 54 54 38 38 38 18 18 18 10 10 10
33748 - 2 2 6 2 2 6 34 34 34 82 82 82
33749 - 38 38 38 14 14 14 0 0 0 0 0 0
33750 - 0 0 0 0 0 0 0 0 0 0 0 0
33751 - 0 0 0 0 0 0 0 0 0 0 0 0
33752 - 0 0 0 0 0 0 0 0 0 0 0 0
33753 - 0 0 0 0 0 0 0 0 0 0 0 0
33754 - 0 0 0 0 0 0 0 0 0 0 0 0
33755 - 0 0 0 0 0 0 0 0 0 0 0 0
33756 - 0 0 0 0 0 0 0 0 0 0 0 0
33757 - 0 0 0 0 0 1 0 0 1 0 0 0
33758 - 0 0 0 0 0 0 0 0 0 0 0 0
33759 - 0 0 0 0 0 0 0 0 0 0 0 0
33760 - 0 0 0 0 0 0 0 0 0 0 0 0
33761 - 0 0 0 0 0 0 0 0 0 0 0 0
33762 - 0 0 0 0 0 0 0 0 0 6 6 6
33763 - 30 30 30 78 78 78 30 30 30 2 2 6
33764 - 2 2 6 2 2 6 2 2 6 2 2 6
33765 - 2 2 6 2 2 6 2 2 6 2 2 6
33766 - 2 2 6 2 2 6 2 2 6 10 10 10
33767 - 10 10 10 2 2 6 2 2 6 2 2 6
33768 - 2 2 6 2 2 6 2 2 6 78 78 78
33769 - 50 50 50 18 18 18 6 6 6 0 0 0
33770 - 0 0 0 0 0 0 0 0 0 0 0 0
33771 - 0 0 0 0 0 0 0 0 0 0 0 0
33772 - 0 0 0 0 0 0 0 0 0 0 0 0
33773 - 0 0 0 0 0 0 0 0 0 0 0 0
33774 - 0 0 0 0 0 0 0 0 0 0 0 0
33775 - 0 0 0 0 0 0 0 0 0 0 0 0
33776 - 0 0 0 0 0 0 0 0 0 0 0 0
33777 - 0 0 1 0 0 0 0 0 0 0 0 0
33778 - 0 0 0 0 0 0 0 0 0 0 0 0
33779 - 0 0 0 0 0 0 0 0 0 0 0 0
33780 - 0 0 0 0 0 0 0 0 0 0 0 0
33781 - 0 0 0 0 0 0 0 0 0 0 0 0
33782 - 0 0 0 0 0 0 0 0 0 10 10 10
33783 - 38 38 38 86 86 86 14 14 14 2 2 6
33784 - 2 2 6 2 2 6 2 2 6 2 2 6
33785 - 2 2 6 2 2 6 2 2 6 2 2 6
33786 - 2 2 6 2 2 6 2 2 6 2 2 6
33787 - 2 2 6 2 2 6 2 2 6 2 2 6
33788 - 2 2 6 2 2 6 2 2 6 54 54 54
33789 - 66 66 66 26 26 26 6 6 6 0 0 0
33790 - 0 0 0 0 0 0 0 0 0 0 0 0
33791 - 0 0 0 0 0 0 0 0 0 0 0 0
33792 - 0 0 0 0 0 0 0 0 0 0 0 0
33793 - 0 0 0 0 0 0 0 0 0 0 0 0
33794 - 0 0 0 0 0 0 0 0 0 0 0 0
33795 - 0 0 0 0 0 0 0 0 0 0 0 0
33796 - 0 0 0 0 0 0 0 0 0 0 0 0
33797 - 0 0 0 0 0 1 0 0 1 0 0 0
33798 - 0 0 0 0 0 0 0 0 0 0 0 0
33799 - 0 0 0 0 0 0 0 0 0 0 0 0
33800 - 0 0 0 0 0 0 0 0 0 0 0 0
33801 - 0 0 0 0 0 0 0 0 0 0 0 0
33802 - 0 0 0 0 0 0 0 0 0 14 14 14
33803 - 42 42 42 82 82 82 2 2 6 2 2 6
33804 - 2 2 6 6 6 6 10 10 10 2 2 6
33805 - 2 2 6 2 2 6 2 2 6 2 2 6
33806 - 2 2 6 2 2 6 2 2 6 6 6 6
33807 - 14 14 14 10 10 10 2 2 6 2 2 6
33808 - 2 2 6 2 2 6 2 2 6 18 18 18
33809 - 82 82 82 34 34 34 10 10 10 0 0 0
33810 - 0 0 0 0 0 0 0 0 0 0 0 0
33811 - 0 0 0 0 0 0 0 0 0 0 0 0
33812 - 0 0 0 0 0 0 0 0 0 0 0 0
33813 - 0 0 0 0 0 0 0 0 0 0 0 0
33814 - 0 0 0 0 0 0 0 0 0 0 0 0
33815 - 0 0 0 0 0 0 0 0 0 0 0 0
33816 - 0 0 0 0 0 0 0 0 0 0 0 0
33817 - 0 0 1 0 0 0 0 0 0 0 0 0
33818 - 0 0 0 0 0 0 0 0 0 0 0 0
33819 - 0 0 0 0 0 0 0 0 0 0 0 0
33820 - 0 0 0 0 0 0 0 0 0 0 0 0
33821 - 0 0 0 0 0 0 0 0 0 0 0 0
33822 - 0 0 0 0 0 0 0 0 0 14 14 14
33823 - 46 46 46 86 86 86 2 2 6 2 2 6
33824 - 6 6 6 6 6 6 22 22 22 34 34 34
33825 - 6 6 6 2 2 6 2 2 6 2 2 6
33826 - 2 2 6 2 2 6 18 18 18 34 34 34
33827 - 10 10 10 50 50 50 22 22 22 2 2 6
33828 - 2 2 6 2 2 6 2 2 6 10 10 10
33829 - 86 86 86 42 42 42 14 14 14 0 0 0
33830 - 0 0 0 0 0 0 0 0 0 0 0 0
33831 - 0 0 0 0 0 0 0 0 0 0 0 0
33832 - 0 0 0 0 0 0 0 0 0 0 0 0
33833 - 0 0 0 0 0 0 0 0 0 0 0 0
33834 - 0 0 0 0 0 0 0 0 0 0 0 0
33835 - 0 0 0 0 0 0 0 0 0 0 0 0
33836 - 0 0 0 0 0 0 0 0 0 0 0 0
33837 - 0 0 1 0 0 1 0 0 1 0 0 0
33838 - 0 0 0 0 0 0 0 0 0 0 0 0
33839 - 0 0 0 0 0 0 0 0 0 0 0 0
33840 - 0 0 0 0 0 0 0 0 0 0 0 0
33841 - 0 0 0 0 0 0 0 0 0 0 0 0
33842 - 0 0 0 0 0 0 0 0 0 14 14 14
33843 - 46 46 46 86 86 86 2 2 6 2 2 6
33844 - 38 38 38 116 116 116 94 94 94 22 22 22
33845 - 22 22 22 2 2 6 2 2 6 2 2 6
33846 - 14 14 14 86 86 86 138 138 138 162 162 162
33847 -154 154 154 38 38 38 26 26 26 6 6 6
33848 - 2 2 6 2 2 6 2 2 6 2 2 6
33849 - 86 86 86 46 46 46 14 14 14 0 0 0
33850 - 0 0 0 0 0 0 0 0 0 0 0 0
33851 - 0 0 0 0 0 0 0 0 0 0 0 0
33852 - 0 0 0 0 0 0 0 0 0 0 0 0
33853 - 0 0 0 0 0 0 0 0 0 0 0 0
33854 - 0 0 0 0 0 0 0 0 0 0 0 0
33855 - 0 0 0 0 0 0 0 0 0 0 0 0
33856 - 0 0 0 0 0 0 0 0 0 0 0 0
33857 - 0 0 0 0 0 0 0 0 0 0 0 0
33858 - 0 0 0 0 0 0 0 0 0 0 0 0
33859 - 0 0 0 0 0 0 0 0 0 0 0 0
33860 - 0 0 0 0 0 0 0 0 0 0 0 0
33861 - 0 0 0 0 0 0 0 0 0 0 0 0
33862 - 0 0 0 0 0 0 0 0 0 14 14 14
33863 - 46 46 46 86 86 86 2 2 6 14 14 14
33864 -134 134 134 198 198 198 195 195 195 116 116 116
33865 - 10 10 10 2 2 6 2 2 6 6 6 6
33866 -101 98 89 187 187 187 210 210 210 218 218 218
33867 -214 214 214 134 134 134 14 14 14 6 6 6
33868 - 2 2 6 2 2 6 2 2 6 2 2 6
33869 - 86 86 86 50 50 50 18 18 18 6 6 6
33870 - 0 0 0 0 0 0 0 0 0 0 0 0
33871 - 0 0 0 0 0 0 0 0 0 0 0 0
33872 - 0 0 0 0 0 0 0 0 0 0 0 0
33873 - 0 0 0 0 0 0 0 0 0 0 0 0
33874 - 0 0 0 0 0 0 0 0 0 0 0 0
33875 - 0 0 0 0 0 0 0 0 0 0 0 0
33876 - 0 0 0 0 0 0 0 0 1 0 0 0
33877 - 0 0 1 0 0 1 0 0 1 0 0 0
33878 - 0 0 0 0 0 0 0 0 0 0 0 0
33879 - 0 0 0 0 0 0 0 0 0 0 0 0
33880 - 0 0 0 0 0 0 0 0 0 0 0 0
33881 - 0 0 0 0 0 0 0 0 0 0 0 0
33882 - 0 0 0 0 0 0 0 0 0 14 14 14
33883 - 46 46 46 86 86 86 2 2 6 54 54 54
33884 -218 218 218 195 195 195 226 226 226 246 246 246
33885 - 58 58 58 2 2 6 2 2 6 30 30 30
33886 -210 210 210 253 253 253 174 174 174 123 123 123
33887 -221 221 221 234 234 234 74 74 74 2 2 6
33888 - 2 2 6 2 2 6 2 2 6 2 2 6
33889 - 70 70 70 58 58 58 22 22 22 6 6 6
33890 - 0 0 0 0 0 0 0 0 0 0 0 0
33891 - 0 0 0 0 0 0 0 0 0 0 0 0
33892 - 0 0 0 0 0 0 0 0 0 0 0 0
33893 - 0 0 0 0 0 0 0 0 0 0 0 0
33894 - 0 0 0 0 0 0 0 0 0 0 0 0
33895 - 0 0 0 0 0 0 0 0 0 0 0 0
33896 - 0 0 0 0 0 0 0 0 0 0 0 0
33897 - 0 0 0 0 0 0 0 0 0 0 0 0
33898 - 0 0 0 0 0 0 0 0 0 0 0 0
33899 - 0 0 0 0 0 0 0 0 0 0 0 0
33900 - 0 0 0 0 0 0 0 0 0 0 0 0
33901 - 0 0 0 0 0 0 0 0 0 0 0 0
33902 - 0 0 0 0 0 0 0 0 0 14 14 14
33903 - 46 46 46 82 82 82 2 2 6 106 106 106
33904 -170 170 170 26 26 26 86 86 86 226 226 226
33905 -123 123 123 10 10 10 14 14 14 46 46 46
33906 -231 231 231 190 190 190 6 6 6 70 70 70
33907 - 90 90 90 238 238 238 158 158 158 2 2 6
33908 - 2 2 6 2 2 6 2 2 6 2 2 6
33909 - 70 70 70 58 58 58 22 22 22 6 6 6
33910 - 0 0 0 0 0 0 0 0 0 0 0 0
33911 - 0 0 0 0 0 0 0 0 0 0 0 0
33912 - 0 0 0 0 0 0 0 0 0 0 0 0
33913 - 0 0 0 0 0 0 0 0 0 0 0 0
33914 - 0 0 0 0 0 0 0 0 0 0 0 0
33915 - 0 0 0 0 0 0 0 0 0 0 0 0
33916 - 0 0 0 0 0 0 0 0 1 0 0 0
33917 - 0 0 1 0 0 1 0 0 1 0 0 0
33918 - 0 0 0 0 0 0 0 0 0 0 0 0
33919 - 0 0 0 0 0 0 0 0 0 0 0 0
33920 - 0 0 0 0 0 0 0 0 0 0 0 0
33921 - 0 0 0 0 0 0 0 0 0 0 0 0
33922 - 0 0 0 0 0 0 0 0 0 14 14 14
33923 - 42 42 42 86 86 86 6 6 6 116 116 116
33924 -106 106 106 6 6 6 70 70 70 149 149 149
33925 -128 128 128 18 18 18 38 38 38 54 54 54
33926 -221 221 221 106 106 106 2 2 6 14 14 14
33927 - 46 46 46 190 190 190 198 198 198 2 2 6
33928 - 2 2 6 2 2 6 2 2 6 2 2 6
33929 - 74 74 74 62 62 62 22 22 22 6 6 6
33930 - 0 0 0 0 0 0 0 0 0 0 0 0
33931 - 0 0 0 0 0 0 0 0 0 0 0 0
33932 - 0 0 0 0 0 0 0 0 0 0 0 0
33933 - 0 0 0 0 0 0 0 0 0 0 0 0
33934 - 0 0 0 0 0 0 0 0 0 0 0 0
33935 - 0 0 0 0 0 0 0 0 0 0 0 0
33936 - 0 0 0 0 0 0 0 0 1 0 0 0
33937 - 0 0 1 0 0 0 0 0 1 0 0 0
33938 - 0 0 0 0 0 0 0 0 0 0 0 0
33939 - 0 0 0 0 0 0 0 0 0 0 0 0
33940 - 0 0 0 0 0 0 0 0 0 0 0 0
33941 - 0 0 0 0 0 0 0 0 0 0 0 0
33942 - 0 0 0 0 0 0 0 0 0 14 14 14
33943 - 42 42 42 94 94 94 14 14 14 101 101 101
33944 -128 128 128 2 2 6 18 18 18 116 116 116
33945 -118 98 46 121 92 8 121 92 8 98 78 10
33946 -162 162 162 106 106 106 2 2 6 2 2 6
33947 - 2 2 6 195 195 195 195 195 195 6 6 6
33948 - 2 2 6 2 2 6 2 2 6 2 2 6
33949 - 74 74 74 62 62 62 22 22 22 6 6 6
33950 - 0 0 0 0 0 0 0 0 0 0 0 0
33951 - 0 0 0 0 0 0 0 0 0 0 0 0
33952 - 0 0 0 0 0 0 0 0 0 0 0 0
33953 - 0 0 0 0 0 0 0 0 0 0 0 0
33954 - 0 0 0 0 0 0 0 0 0 0 0 0
33955 - 0 0 0 0 0 0 0 0 0 0 0 0
33956 - 0 0 0 0 0 0 0 0 1 0 0 1
33957 - 0 0 1 0 0 0 0 0 1 0 0 0
33958 - 0 0 0 0 0 0 0 0 0 0 0 0
33959 - 0 0 0 0 0 0 0 0 0 0 0 0
33960 - 0 0 0 0 0 0 0 0 0 0 0 0
33961 - 0 0 0 0 0 0 0 0 0 0 0 0
33962 - 0 0 0 0 0 0 0 0 0 10 10 10
33963 - 38 38 38 90 90 90 14 14 14 58 58 58
33964 -210 210 210 26 26 26 54 38 6 154 114 10
33965 -226 170 11 236 186 11 225 175 15 184 144 12
33966 -215 174 15 175 146 61 37 26 9 2 2 6
33967 - 70 70 70 246 246 246 138 138 138 2 2 6
33968 - 2 2 6 2 2 6 2 2 6 2 2 6
33969 - 70 70 70 66 66 66 26 26 26 6 6 6
33970 - 0 0 0 0 0 0 0 0 0 0 0 0
33971 - 0 0 0 0 0 0 0 0 0 0 0 0
33972 - 0 0 0 0 0 0 0 0 0 0 0 0
33973 - 0 0 0 0 0 0 0 0 0 0 0 0
33974 - 0 0 0 0 0 0 0 0 0 0 0 0
33975 - 0 0 0 0 0 0 0 0 0 0 0 0
33976 - 0 0 0 0 0 0 0 0 0 0 0 0
33977 - 0 0 0 0 0 0 0 0 0 0 0 0
33978 - 0 0 0 0 0 0 0 0 0 0 0 0
33979 - 0 0 0 0 0 0 0 0 0 0 0 0
33980 - 0 0 0 0 0 0 0 0 0 0 0 0
33981 - 0 0 0 0 0 0 0 0 0 0 0 0
33982 - 0 0 0 0 0 0 0 0 0 10 10 10
33983 - 38 38 38 86 86 86 14 14 14 10 10 10
33984 -195 195 195 188 164 115 192 133 9 225 175 15
33985 -239 182 13 234 190 10 232 195 16 232 200 30
33986 -245 207 45 241 208 19 232 195 16 184 144 12
33987 -218 194 134 211 206 186 42 42 42 2 2 6
33988 - 2 2 6 2 2 6 2 2 6 2 2 6
33989 - 50 50 50 74 74 74 30 30 30 6 6 6
33990 - 0 0 0 0 0 0 0 0 0 0 0 0
33991 - 0 0 0 0 0 0 0 0 0 0 0 0
33992 - 0 0 0 0 0 0 0 0 0 0 0 0
33993 - 0 0 0 0 0 0 0 0 0 0 0 0
33994 - 0 0 0 0 0 0 0 0 0 0 0 0
33995 - 0 0 0 0 0 0 0 0 0 0 0 0
33996 - 0 0 0 0 0 0 0 0 0 0 0 0
33997 - 0 0 0 0 0 0 0 0 0 0 0 0
33998 - 0 0 0 0 0 0 0 0 0 0 0 0
33999 - 0 0 0 0 0 0 0 0 0 0 0 0
34000 - 0 0 0 0 0 0 0 0 0 0 0 0
34001 - 0 0 0 0 0 0 0 0 0 0 0 0
34002 - 0 0 0 0 0 0 0 0 0 10 10 10
34003 - 34 34 34 86 86 86 14 14 14 2 2 6
34004 -121 87 25 192 133 9 219 162 10 239 182 13
34005 -236 186 11 232 195 16 241 208 19 244 214 54
34006 -246 218 60 246 218 38 246 215 20 241 208 19
34007 -241 208 19 226 184 13 121 87 25 2 2 6
34008 - 2 2 6 2 2 6 2 2 6 2 2 6
34009 - 50 50 50 82 82 82 34 34 34 10 10 10
34010 - 0 0 0 0 0 0 0 0 0 0 0 0
34011 - 0 0 0 0 0 0 0 0 0 0 0 0
34012 - 0 0 0 0 0 0 0 0 0 0 0 0
34013 - 0 0 0 0 0 0 0 0 0 0 0 0
34014 - 0 0 0 0 0 0 0 0 0 0 0 0
34015 - 0 0 0 0 0 0 0 0 0 0 0 0
34016 - 0 0 0 0 0 0 0 0 0 0 0 0
34017 - 0 0 0 0 0 0 0 0 0 0 0 0
34018 - 0 0 0 0 0 0 0 0 0 0 0 0
34019 - 0 0 0 0 0 0 0 0 0 0 0 0
34020 - 0 0 0 0 0 0 0 0 0 0 0 0
34021 - 0 0 0 0 0 0 0 0 0 0 0 0
34022 - 0 0 0 0 0 0 0 0 0 10 10 10
34023 - 34 34 34 82 82 82 30 30 30 61 42 6
34024 -180 123 7 206 145 10 230 174 11 239 182 13
34025 -234 190 10 238 202 15 241 208 19 246 218 74
34026 -246 218 38 246 215 20 246 215 20 246 215 20
34027 -226 184 13 215 174 15 184 144 12 6 6 6
34028 - 2 2 6 2 2 6 2 2 6 2 2 6
34029 - 26 26 26 94 94 94 42 42 42 14 14 14
34030 - 0 0 0 0 0 0 0 0 0 0 0 0
34031 - 0 0 0 0 0 0 0 0 0 0 0 0
34032 - 0 0 0 0 0 0 0 0 0 0 0 0
34033 - 0 0 0 0 0 0 0 0 0 0 0 0
34034 - 0 0 0 0 0 0 0 0 0 0 0 0
34035 - 0 0 0 0 0 0 0 0 0 0 0 0
34036 - 0 0 0 0 0 0 0 0 0 0 0 0
34037 - 0 0 0 0 0 0 0 0 0 0 0 0
34038 - 0 0 0 0 0 0 0 0 0 0 0 0
34039 - 0 0 0 0 0 0 0 0 0 0 0 0
34040 - 0 0 0 0 0 0 0 0 0 0 0 0
34041 - 0 0 0 0 0 0 0 0 0 0 0 0
34042 - 0 0 0 0 0 0 0 0 0 10 10 10
34043 - 30 30 30 78 78 78 50 50 50 104 69 6
34044 -192 133 9 216 158 10 236 178 12 236 186 11
34045 -232 195 16 241 208 19 244 214 54 245 215 43
34046 -246 215 20 246 215 20 241 208 19 198 155 10
34047 -200 144 11 216 158 10 156 118 10 2 2 6
34048 - 2 2 6 2 2 6 2 2 6 2 2 6
34049 - 6 6 6 90 90 90 54 54 54 18 18 18
34050 - 6 6 6 0 0 0 0 0 0 0 0 0
34051 - 0 0 0 0 0 0 0 0 0 0 0 0
34052 - 0 0 0 0 0 0 0 0 0 0 0 0
34053 - 0 0 0 0 0 0 0 0 0 0 0 0
34054 - 0 0 0 0 0 0 0 0 0 0 0 0
34055 - 0 0 0 0 0 0 0 0 0 0 0 0
34056 - 0 0 0 0 0 0 0 0 0 0 0 0
34057 - 0 0 0 0 0 0 0 0 0 0 0 0
34058 - 0 0 0 0 0 0 0 0 0 0 0 0
34059 - 0 0 0 0 0 0 0 0 0 0 0 0
34060 - 0 0 0 0 0 0 0 0 0 0 0 0
34061 - 0 0 0 0 0 0 0 0 0 0 0 0
34062 - 0 0 0 0 0 0 0 0 0 10 10 10
34063 - 30 30 30 78 78 78 46 46 46 22 22 22
34064 -137 92 6 210 162 10 239 182 13 238 190 10
34065 -238 202 15 241 208 19 246 215 20 246 215 20
34066 -241 208 19 203 166 17 185 133 11 210 150 10
34067 -216 158 10 210 150 10 102 78 10 2 2 6
34068 - 6 6 6 54 54 54 14 14 14 2 2 6
34069 - 2 2 6 62 62 62 74 74 74 30 30 30
34070 - 10 10 10 0 0 0 0 0 0 0 0 0
34071 - 0 0 0 0 0 0 0 0 0 0 0 0
34072 - 0 0 0 0 0 0 0 0 0 0 0 0
34073 - 0 0 0 0 0 0 0 0 0 0 0 0
34074 - 0 0 0 0 0 0 0 0 0 0 0 0
34075 - 0 0 0 0 0 0 0 0 0 0 0 0
34076 - 0 0 0 0 0 0 0 0 0 0 0 0
34077 - 0 0 0 0 0 0 0 0 0 0 0 0
34078 - 0 0 0 0 0 0 0 0 0 0 0 0
34079 - 0 0 0 0 0 0 0 0 0 0 0 0
34080 - 0 0 0 0 0 0 0 0 0 0 0 0
34081 - 0 0 0 0 0 0 0 0 0 0 0 0
34082 - 0 0 0 0 0 0 0 0 0 10 10 10
34083 - 34 34 34 78 78 78 50 50 50 6 6 6
34084 - 94 70 30 139 102 15 190 146 13 226 184 13
34085 -232 200 30 232 195 16 215 174 15 190 146 13
34086 -168 122 10 192 133 9 210 150 10 213 154 11
34087 -202 150 34 182 157 106 101 98 89 2 2 6
34088 - 2 2 6 78 78 78 116 116 116 58 58 58
34089 - 2 2 6 22 22 22 90 90 90 46 46 46
34090 - 18 18 18 6 6 6 0 0 0 0 0 0
34091 - 0 0 0 0 0 0 0 0 0 0 0 0
34092 - 0 0 0 0 0 0 0 0 0 0 0 0
34093 - 0 0 0 0 0 0 0 0 0 0 0 0
34094 - 0 0 0 0 0 0 0 0 0 0 0 0
34095 - 0 0 0 0 0 0 0 0 0 0 0 0
34096 - 0 0 0 0 0 0 0 0 0 0 0 0
34097 - 0 0 0 0 0 0 0 0 0 0 0 0
34098 - 0 0 0 0 0 0 0 0 0 0 0 0
34099 - 0 0 0 0 0 0 0 0 0 0 0 0
34100 - 0 0 0 0 0 0 0 0 0 0 0 0
34101 - 0 0 0 0 0 0 0 0 0 0 0 0
34102 - 0 0 0 0 0 0 0 0 0 10 10 10
34103 - 38 38 38 86 86 86 50 50 50 6 6 6
34104 -128 128 128 174 154 114 156 107 11 168 122 10
34105 -198 155 10 184 144 12 197 138 11 200 144 11
34106 -206 145 10 206 145 10 197 138 11 188 164 115
34107 -195 195 195 198 198 198 174 174 174 14 14 14
34108 - 2 2 6 22 22 22 116 116 116 116 116 116
34109 - 22 22 22 2 2 6 74 74 74 70 70 70
34110 - 30 30 30 10 10 10 0 0 0 0 0 0
34111 - 0 0 0 0 0 0 0 0 0 0 0 0
34112 - 0 0 0 0 0 0 0 0 0 0 0 0
34113 - 0 0 0 0 0 0 0 0 0 0 0 0
34114 - 0 0 0 0 0 0 0 0 0 0 0 0
34115 - 0 0 0 0 0 0 0 0 0 0 0 0
34116 - 0 0 0 0 0 0 0 0 0 0 0 0
34117 - 0 0 0 0 0 0 0 0 0 0 0 0
34118 - 0 0 0 0 0 0 0 0 0 0 0 0
34119 - 0 0 0 0 0 0 0 0 0 0 0 0
34120 - 0 0 0 0 0 0 0 0 0 0 0 0
34121 - 0 0 0 0 0 0 0 0 0 0 0 0
34122 - 0 0 0 0 0 0 6 6 6 18 18 18
34123 - 50 50 50 101 101 101 26 26 26 10 10 10
34124 -138 138 138 190 190 190 174 154 114 156 107 11
34125 -197 138 11 200 144 11 197 138 11 192 133 9
34126 -180 123 7 190 142 34 190 178 144 187 187 187
34127 -202 202 202 221 221 221 214 214 214 66 66 66
34128 - 2 2 6 2 2 6 50 50 50 62 62 62
34129 - 6 6 6 2 2 6 10 10 10 90 90 90
34130 - 50 50 50 18 18 18 6 6 6 0 0 0
34131 - 0 0 0 0 0 0 0 0 0 0 0 0
34132 - 0 0 0 0 0 0 0 0 0 0 0 0
34133 - 0 0 0 0 0 0 0 0 0 0 0 0
34134 - 0 0 0 0 0 0 0 0 0 0 0 0
34135 - 0 0 0 0 0 0 0 0 0 0 0 0
34136 - 0 0 0 0 0 0 0 0 0 0 0 0
34137 - 0 0 0 0 0 0 0 0 0 0 0 0
34138 - 0 0 0 0 0 0 0 0 0 0 0 0
34139 - 0 0 0 0 0 0 0 0 0 0 0 0
34140 - 0 0 0 0 0 0 0 0 0 0 0 0
34141 - 0 0 0 0 0 0 0 0 0 0 0 0
34142 - 0 0 0 0 0 0 10 10 10 34 34 34
34143 - 74 74 74 74 74 74 2 2 6 6 6 6
34144 -144 144 144 198 198 198 190 190 190 178 166 146
34145 -154 121 60 156 107 11 156 107 11 168 124 44
34146 -174 154 114 187 187 187 190 190 190 210 210 210
34147 -246 246 246 253 253 253 253 253 253 182 182 182
34148 - 6 6 6 2 2 6 2 2 6 2 2 6
34149 - 2 2 6 2 2 6 2 2 6 62 62 62
34150 - 74 74 74 34 34 34 14 14 14 0 0 0
34151 - 0 0 0 0 0 0 0 0 0 0 0 0
34152 - 0 0 0 0 0 0 0 0 0 0 0 0
34153 - 0 0 0 0 0 0 0 0 0 0 0 0
34154 - 0 0 0 0 0 0 0 0 0 0 0 0
34155 - 0 0 0 0 0 0 0 0 0 0 0 0
34156 - 0 0 0 0 0 0 0 0 0 0 0 0
34157 - 0 0 0 0 0 0 0 0 0 0 0 0
34158 - 0 0 0 0 0 0 0 0 0 0 0 0
34159 - 0 0 0 0 0 0 0 0 0 0 0 0
34160 - 0 0 0 0 0 0 0 0 0 0 0 0
34161 - 0 0 0 0 0 0 0 0 0 0 0 0
34162 - 0 0 0 10 10 10 22 22 22 54 54 54
34163 - 94 94 94 18 18 18 2 2 6 46 46 46
34164 -234 234 234 221 221 221 190 190 190 190 190 190
34165 -190 190 190 187 187 187 187 187 187 190 190 190
34166 -190 190 190 195 195 195 214 214 214 242 242 242
34167 -253 253 253 253 253 253 253 253 253 253 253 253
34168 - 82 82 82 2 2 6 2 2 6 2 2 6
34169 - 2 2 6 2 2 6 2 2 6 14 14 14
34170 - 86 86 86 54 54 54 22 22 22 6 6 6
34171 - 0 0 0 0 0 0 0 0 0 0 0 0
34172 - 0 0 0 0 0 0 0 0 0 0 0 0
34173 - 0 0 0 0 0 0 0 0 0 0 0 0
34174 - 0 0 0 0 0 0 0 0 0 0 0 0
34175 - 0 0 0 0 0 0 0 0 0 0 0 0
34176 - 0 0 0 0 0 0 0 0 0 0 0 0
34177 - 0 0 0 0 0 0 0 0 0 0 0 0
34178 - 0 0 0 0 0 0 0 0 0 0 0 0
34179 - 0 0 0 0 0 0 0 0 0 0 0 0
34180 - 0 0 0 0 0 0 0 0 0 0 0 0
34181 - 0 0 0 0 0 0 0 0 0 0 0 0
34182 - 6 6 6 18 18 18 46 46 46 90 90 90
34183 - 46 46 46 18 18 18 6 6 6 182 182 182
34184 -253 253 253 246 246 246 206 206 206 190 190 190
34185 -190 190 190 190 190 190 190 190 190 190 190 190
34186 -206 206 206 231 231 231 250 250 250 253 253 253
34187 -253 253 253 253 253 253 253 253 253 253 253 253
34188 -202 202 202 14 14 14 2 2 6 2 2 6
34189 - 2 2 6 2 2 6 2 2 6 2 2 6
34190 - 42 42 42 86 86 86 42 42 42 18 18 18
34191 - 6 6 6 0 0 0 0 0 0 0 0 0
34192 - 0 0 0 0 0 0 0 0 0 0 0 0
34193 - 0 0 0 0 0 0 0 0 0 0 0 0
34194 - 0 0 0 0 0 0 0 0 0 0 0 0
34195 - 0 0 0 0 0 0 0 0 0 0 0 0
34196 - 0 0 0 0 0 0 0 0 0 0 0 0
34197 - 0 0 0 0 0 0 0 0 0 0 0 0
34198 - 0 0 0 0 0 0 0 0 0 0 0 0
34199 - 0 0 0 0 0 0 0 0 0 0 0 0
34200 - 0 0 0 0 0 0 0 0 0 0 0 0
34201 - 0 0 0 0 0 0 0 0 0 6 6 6
34202 - 14 14 14 38 38 38 74 74 74 66 66 66
34203 - 2 2 6 6 6 6 90 90 90 250 250 250
34204 -253 253 253 253 253 253 238 238 238 198 198 198
34205 -190 190 190 190 190 190 195 195 195 221 221 221
34206 -246 246 246 253 253 253 253 253 253 253 253 253
34207 -253 253 253 253 253 253 253 253 253 253 253 253
34208 -253 253 253 82 82 82 2 2 6 2 2 6
34209 - 2 2 6 2 2 6 2 2 6 2 2 6
34210 - 2 2 6 78 78 78 70 70 70 34 34 34
34211 - 14 14 14 6 6 6 0 0 0 0 0 0
34212 - 0 0 0 0 0 0 0 0 0 0 0 0
34213 - 0 0 0 0 0 0 0 0 0 0 0 0
34214 - 0 0 0 0 0 0 0 0 0 0 0 0
34215 - 0 0 0 0 0 0 0 0 0 0 0 0
34216 - 0 0 0 0 0 0 0 0 0 0 0 0
34217 - 0 0 0 0 0 0 0 0 0 0 0 0
34218 - 0 0 0 0 0 0 0 0 0 0 0 0
34219 - 0 0 0 0 0 0 0 0 0 0 0 0
34220 - 0 0 0 0 0 0 0 0 0 0 0 0
34221 - 0 0 0 0 0 0 0 0 0 14 14 14
34222 - 34 34 34 66 66 66 78 78 78 6 6 6
34223 - 2 2 6 18 18 18 218 218 218 253 253 253
34224 -253 253 253 253 253 253 253 253 253 246 246 246
34225 -226 226 226 231 231 231 246 246 246 253 253 253
34226 -253 253 253 253 253 253 253 253 253 253 253 253
34227 -253 253 253 253 253 253 253 253 253 253 253 253
34228 -253 253 253 178 178 178 2 2 6 2 2 6
34229 - 2 2 6 2 2 6 2 2 6 2 2 6
34230 - 2 2 6 18 18 18 90 90 90 62 62 62
34231 - 30 30 30 10 10 10 0 0 0 0 0 0
34232 - 0 0 0 0 0 0 0 0 0 0 0 0
34233 - 0 0 0 0 0 0 0 0 0 0 0 0
34234 - 0 0 0 0 0 0 0 0 0 0 0 0
34235 - 0 0 0 0 0 0 0 0 0 0 0 0
34236 - 0 0 0 0 0 0 0 0 0 0 0 0
34237 - 0 0 0 0 0 0 0 0 0 0 0 0
34238 - 0 0 0 0 0 0 0 0 0 0 0 0
34239 - 0 0 0 0 0 0 0 0 0 0 0 0
34240 - 0 0 0 0 0 0 0 0 0 0 0 0
34241 - 0 0 0 0 0 0 10 10 10 26 26 26
34242 - 58 58 58 90 90 90 18 18 18 2 2 6
34243 - 2 2 6 110 110 110 253 253 253 253 253 253
34244 -253 253 253 253 253 253 253 253 253 253 253 253
34245 -250 250 250 253 253 253 253 253 253 253 253 253
34246 -253 253 253 253 253 253 253 253 253 253 253 253
34247 -253 253 253 253 253 253 253 253 253 253 253 253
34248 -253 253 253 231 231 231 18 18 18 2 2 6
34249 - 2 2 6 2 2 6 2 2 6 2 2 6
34250 - 2 2 6 2 2 6 18 18 18 94 94 94
34251 - 54 54 54 26 26 26 10 10 10 0 0 0
34252 - 0 0 0 0 0 0 0 0 0 0 0 0
34253 - 0 0 0 0 0 0 0 0 0 0 0 0
34254 - 0 0 0 0 0 0 0 0 0 0 0 0
34255 - 0 0 0 0 0 0 0 0 0 0 0 0
34256 - 0 0 0 0 0 0 0 0 0 0 0 0
34257 - 0 0 0 0 0 0 0 0 0 0 0 0
34258 - 0 0 0 0 0 0 0 0 0 0 0 0
34259 - 0 0 0 0 0 0 0 0 0 0 0 0
34260 - 0 0 0 0 0 0 0 0 0 0 0 0
34261 - 0 0 0 6 6 6 22 22 22 50 50 50
34262 - 90 90 90 26 26 26 2 2 6 2 2 6
34263 - 14 14 14 195 195 195 250 250 250 253 253 253
34264 -253 253 253 253 253 253 253 253 253 253 253 253
34265 -253 253 253 253 253 253 253 253 253 253 253 253
34266 -253 253 253 253 253 253 253 253 253 253 253 253
34267 -253 253 253 253 253 253 253 253 253 253 253 253
34268 -250 250 250 242 242 242 54 54 54 2 2 6
34269 - 2 2 6 2 2 6 2 2 6 2 2 6
34270 - 2 2 6 2 2 6 2 2 6 38 38 38
34271 - 86 86 86 50 50 50 22 22 22 6 6 6
34272 - 0 0 0 0 0 0 0 0 0 0 0 0
34273 - 0 0 0 0 0 0 0 0 0 0 0 0
34274 - 0 0 0 0 0 0 0 0 0 0 0 0
34275 - 0 0 0 0 0 0 0 0 0 0 0 0
34276 - 0 0 0 0 0 0 0 0 0 0 0 0
34277 - 0 0 0 0 0 0 0 0 0 0 0 0
34278 - 0 0 0 0 0 0 0 0 0 0 0 0
34279 - 0 0 0 0 0 0 0 0 0 0 0 0
34280 - 0 0 0 0 0 0 0 0 0 0 0 0
34281 - 6 6 6 14 14 14 38 38 38 82 82 82
34282 - 34 34 34 2 2 6 2 2 6 2 2 6
34283 - 42 42 42 195 195 195 246 246 246 253 253 253
34284 -253 253 253 253 253 253 253 253 253 250 250 250
34285 -242 242 242 242 242 242 250 250 250 253 253 253
34286 -253 253 253 253 253 253 253 253 253 253 253 253
34287 -253 253 253 250 250 250 246 246 246 238 238 238
34288 -226 226 226 231 231 231 101 101 101 6 6 6
34289 - 2 2 6 2 2 6 2 2 6 2 2 6
34290 - 2 2 6 2 2 6 2 2 6 2 2 6
34291 - 38 38 38 82 82 82 42 42 42 14 14 14
34292 - 6 6 6 0 0 0 0 0 0 0 0 0
34293 - 0 0 0 0 0 0 0 0 0 0 0 0
34294 - 0 0 0 0 0 0 0 0 0 0 0 0
34295 - 0 0 0 0 0 0 0 0 0 0 0 0
34296 - 0 0 0 0 0 0 0 0 0 0 0 0
34297 - 0 0 0 0 0 0 0 0 0 0 0 0
34298 - 0 0 0 0 0 0 0 0 0 0 0 0
34299 - 0 0 0 0 0 0 0 0 0 0 0 0
34300 - 0 0 0 0 0 0 0 0 0 0 0 0
34301 - 10 10 10 26 26 26 62 62 62 66 66 66
34302 - 2 2 6 2 2 6 2 2 6 6 6 6
34303 - 70 70 70 170 170 170 206 206 206 234 234 234
34304 -246 246 246 250 250 250 250 250 250 238 238 238
34305 -226 226 226 231 231 231 238 238 238 250 250 250
34306 -250 250 250 250 250 250 246 246 246 231 231 231
34307 -214 214 214 206 206 206 202 202 202 202 202 202
34308 -198 198 198 202 202 202 182 182 182 18 18 18
34309 - 2 2 6 2 2 6 2 2 6 2 2 6
34310 - 2 2 6 2 2 6 2 2 6 2 2 6
34311 - 2 2 6 62 62 62 66 66 66 30 30 30
34312 - 10 10 10 0 0 0 0 0 0 0 0 0
34313 - 0 0 0 0 0 0 0 0 0 0 0 0
34314 - 0 0 0 0 0 0 0 0 0 0 0 0
34315 - 0 0 0 0 0 0 0 0 0 0 0 0
34316 - 0 0 0 0 0 0 0 0 0 0 0 0
34317 - 0 0 0 0 0 0 0 0 0 0 0 0
34318 - 0 0 0 0 0 0 0 0 0 0 0 0
34319 - 0 0 0 0 0 0 0 0 0 0 0 0
34320 - 0 0 0 0 0 0 0 0 0 0 0 0
34321 - 14 14 14 42 42 42 82 82 82 18 18 18
34322 - 2 2 6 2 2 6 2 2 6 10 10 10
34323 - 94 94 94 182 182 182 218 218 218 242 242 242
34324 -250 250 250 253 253 253 253 253 253 250 250 250
34325 -234 234 234 253 253 253 253 253 253 253 253 253
34326 -253 253 253 253 253 253 253 253 253 246 246 246
34327 -238 238 238 226 226 226 210 210 210 202 202 202
34328 -195 195 195 195 195 195 210 210 210 158 158 158
34329 - 6 6 6 14 14 14 50 50 50 14 14 14
34330 - 2 2 6 2 2 6 2 2 6 2 2 6
34331 - 2 2 6 6 6 6 86 86 86 46 46 46
34332 - 18 18 18 6 6 6 0 0 0 0 0 0
34333 - 0 0 0 0 0 0 0 0 0 0 0 0
34334 - 0 0 0 0 0 0 0 0 0 0 0 0
34335 - 0 0 0 0 0 0 0 0 0 0 0 0
34336 - 0 0 0 0 0 0 0 0 0 0 0 0
34337 - 0 0 0 0 0 0 0 0 0 0 0 0
34338 - 0 0 0 0 0 0 0 0 0 0 0 0
34339 - 0 0 0 0 0 0 0 0 0 0 0 0
34340 - 0 0 0 0 0 0 0 0 0 6 6 6
34341 - 22 22 22 54 54 54 70 70 70 2 2 6
34342 - 2 2 6 10 10 10 2 2 6 22 22 22
34343 -166 166 166 231 231 231 250 250 250 253 253 253
34344 -253 253 253 253 253 253 253 253 253 250 250 250
34345 -242 242 242 253 253 253 253 253 253 253 253 253
34346 -253 253 253 253 253 253 253 253 253 253 253 253
34347 -253 253 253 253 253 253 253 253 253 246 246 246
34348 -231 231 231 206 206 206 198 198 198 226 226 226
34349 - 94 94 94 2 2 6 6 6 6 38 38 38
34350 - 30 30 30 2 2 6 2 2 6 2 2 6
34351 - 2 2 6 2 2 6 62 62 62 66 66 66
34352 - 26 26 26 10 10 10 0 0 0 0 0 0
34353 - 0 0 0 0 0 0 0 0 0 0 0 0
34354 - 0 0 0 0 0 0 0 0 0 0 0 0
34355 - 0 0 0 0 0 0 0 0 0 0 0 0
34356 - 0 0 0 0 0 0 0 0 0 0 0 0
34357 - 0 0 0 0 0 0 0 0 0 0 0 0
34358 - 0 0 0 0 0 0 0 0 0 0 0 0
34359 - 0 0 0 0 0 0 0 0 0 0 0 0
34360 - 0 0 0 0 0 0 0 0 0 10 10 10
34361 - 30 30 30 74 74 74 50 50 50 2 2 6
34362 - 26 26 26 26 26 26 2 2 6 106 106 106
34363 -238 238 238 253 253 253 253 253 253 253 253 253
34364 -253 253 253 253 253 253 253 253 253 253 253 253
34365 -253 253 253 253 253 253 253 253 253 253 253 253
34366 -253 253 253 253 253 253 253 253 253 253 253 253
34367 -253 253 253 253 253 253 253 253 253 253 253 253
34368 -253 253 253 246 246 246 218 218 218 202 202 202
34369 -210 210 210 14 14 14 2 2 6 2 2 6
34370 - 30 30 30 22 22 22 2 2 6 2 2 6
34371 - 2 2 6 2 2 6 18 18 18 86 86 86
34372 - 42 42 42 14 14 14 0 0 0 0 0 0
34373 - 0 0 0 0 0 0 0 0 0 0 0 0
34374 - 0 0 0 0 0 0 0 0 0 0 0 0
34375 - 0 0 0 0 0 0 0 0 0 0 0 0
34376 - 0 0 0 0 0 0 0 0 0 0 0 0
34377 - 0 0 0 0 0 0 0 0 0 0 0 0
34378 - 0 0 0 0 0 0 0 0 0 0 0 0
34379 - 0 0 0 0 0 0 0 0 0 0 0 0
34380 - 0 0 0 0 0 0 0 0 0 14 14 14
34381 - 42 42 42 90 90 90 22 22 22 2 2 6
34382 - 42 42 42 2 2 6 18 18 18 218 218 218
34383 -253 253 253 253 253 253 253 253 253 253 253 253
34384 -253 253 253 253 253 253 253 253 253 253 253 253
34385 -253 253 253 253 253 253 253 253 253 253 253 253
34386 -253 253 253 253 253 253 253 253 253 253 253 253
34387 -253 253 253 253 253 253 253 253 253 253 253 253
34388 -253 253 253 253 253 253 250 250 250 221 221 221
34389 -218 218 218 101 101 101 2 2 6 14 14 14
34390 - 18 18 18 38 38 38 10 10 10 2 2 6
34391 - 2 2 6 2 2 6 2 2 6 78 78 78
34392 - 58 58 58 22 22 22 6 6 6 0 0 0
34393 - 0 0 0 0 0 0 0 0 0 0 0 0
34394 - 0 0 0 0 0 0 0 0 0 0 0 0
34395 - 0 0 0 0 0 0 0 0 0 0 0 0
34396 - 0 0 0 0 0 0 0 0 0 0 0 0
34397 - 0 0 0 0 0 0 0 0 0 0 0 0
34398 - 0 0 0 0 0 0 0 0 0 0 0 0
34399 - 0 0 0 0 0 0 0 0 0 0 0 0
34400 - 0 0 0 0 0 0 6 6 6 18 18 18
34401 - 54 54 54 82 82 82 2 2 6 26 26 26
34402 - 22 22 22 2 2 6 123 123 123 253 253 253
34403 -253 253 253 253 253 253 253 253 253 253 253 253
34404 -253 253 253 253 253 253 253 253 253 253 253 253
34405 -253 253 253 253 253 253 253 253 253 253 253 253
34406 -253 253 253 253 253 253 253 253 253 253 253 253
34407 -253 253 253 253 253 253 253 253 253 253 253 253
34408 -253 253 253 253 253 253 253 253 253 250 250 250
34409 -238 238 238 198 198 198 6 6 6 38 38 38
34410 - 58 58 58 26 26 26 38 38 38 2 2 6
34411 - 2 2 6 2 2 6 2 2 6 46 46 46
34412 - 78 78 78 30 30 30 10 10 10 0 0 0
34413 - 0 0 0 0 0 0 0 0 0 0 0 0
34414 - 0 0 0 0 0 0 0 0 0 0 0 0
34415 - 0 0 0 0 0 0 0 0 0 0 0 0
34416 - 0 0 0 0 0 0 0 0 0 0 0 0
34417 - 0 0 0 0 0 0 0 0 0 0 0 0
34418 - 0 0 0 0 0 0 0 0 0 0 0 0
34419 - 0 0 0 0 0 0 0 0 0 0 0 0
34420 - 0 0 0 0 0 0 10 10 10 30 30 30
34421 - 74 74 74 58 58 58 2 2 6 42 42 42
34422 - 2 2 6 22 22 22 231 231 231 253 253 253
34423 -253 253 253 253 253 253 253 253 253 253 253 253
34424 -253 253 253 253 253 253 253 253 253 250 250 250
34425 -253 253 253 253 253 253 253 253 253 253 253 253
34426 -253 253 253 253 253 253 253 253 253 253 253 253
34427 -253 253 253 253 253 253 253 253 253 253 253 253
34428 -253 253 253 253 253 253 253 253 253 253 253 253
34429 -253 253 253 246 246 246 46 46 46 38 38 38
34430 - 42 42 42 14 14 14 38 38 38 14 14 14
34431 - 2 2 6 2 2 6 2 2 6 6 6 6
34432 - 86 86 86 46 46 46 14 14 14 0 0 0
34433 - 0 0 0 0 0 0 0 0 0 0 0 0
34434 - 0 0 0 0 0 0 0 0 0 0 0 0
34435 - 0 0 0 0 0 0 0 0 0 0 0 0
34436 - 0 0 0 0 0 0 0 0 0 0 0 0
34437 - 0 0 0 0 0 0 0 0 0 0 0 0
34438 - 0 0 0 0 0 0 0 0 0 0 0 0
34439 - 0 0 0 0 0 0 0 0 0 0 0 0
34440 - 0 0 0 6 6 6 14 14 14 42 42 42
34441 - 90 90 90 18 18 18 18 18 18 26 26 26
34442 - 2 2 6 116 116 116 253 253 253 253 253 253
34443 -253 253 253 253 253 253 253 253 253 253 253 253
34444 -253 253 253 253 253 253 250 250 250 238 238 238
34445 -253 253 253 253 253 253 253 253 253 253 253 253
34446 -253 253 253 253 253 253 253 253 253 253 253 253
34447 -253 253 253 253 253 253 253 253 253 253 253 253
34448 -253 253 253 253 253 253 253 253 253 253 253 253
34449 -253 253 253 253 253 253 94 94 94 6 6 6
34450 - 2 2 6 2 2 6 10 10 10 34 34 34
34451 - 2 2 6 2 2 6 2 2 6 2 2 6
34452 - 74 74 74 58 58 58 22 22 22 6 6 6
34453 - 0 0 0 0 0 0 0 0 0 0 0 0
34454 - 0 0 0 0 0 0 0 0 0 0 0 0
34455 - 0 0 0 0 0 0 0 0 0 0 0 0
34456 - 0 0 0 0 0 0 0 0 0 0 0 0
34457 - 0 0 0 0 0 0 0 0 0 0 0 0
34458 - 0 0 0 0 0 0 0 0 0 0 0 0
34459 - 0 0 0 0 0 0 0 0 0 0 0 0
34460 - 0 0 0 10 10 10 26 26 26 66 66 66
34461 - 82 82 82 2 2 6 38 38 38 6 6 6
34462 - 14 14 14 210 210 210 253 253 253 253 253 253
34463 -253 253 253 253 253 253 253 253 253 253 253 253
34464 -253 253 253 253 253 253 246 246 246 242 242 242
34465 -253 253 253 253 253 253 253 253 253 253 253 253
34466 -253 253 253 253 253 253 253 253 253 253 253 253
34467 -253 253 253 253 253 253 253 253 253 253 253 253
34468 -253 253 253 253 253 253 253 253 253 253 253 253
34469 -253 253 253 253 253 253 144 144 144 2 2 6
34470 - 2 2 6 2 2 6 2 2 6 46 46 46
34471 - 2 2 6 2 2 6 2 2 6 2 2 6
34472 - 42 42 42 74 74 74 30 30 30 10 10 10
34473 - 0 0 0 0 0 0 0 0 0 0 0 0
34474 - 0 0 0 0 0 0 0 0 0 0 0 0
34475 - 0 0 0 0 0 0 0 0 0 0 0 0
34476 - 0 0 0 0 0 0 0 0 0 0 0 0
34477 - 0 0 0 0 0 0 0 0 0 0 0 0
34478 - 0 0 0 0 0 0 0 0 0 0 0 0
34479 - 0 0 0 0 0 0 0 0 0 0 0 0
34480 - 6 6 6 14 14 14 42 42 42 90 90 90
34481 - 26 26 26 6 6 6 42 42 42 2 2 6
34482 - 74 74 74 250 250 250 253 253 253 253 253 253
34483 -253 253 253 253 253 253 253 253 253 253 253 253
34484 -253 253 253 253 253 253 242 242 242 242 242 242
34485 -253 253 253 253 253 253 253 253 253 253 253 253
34486 -253 253 253 253 253 253 253 253 253 253 253 253
34487 -253 253 253 253 253 253 253 253 253 253 253 253
34488 -253 253 253 253 253 253 253 253 253 253 253 253
34489 -253 253 253 253 253 253 182 182 182 2 2 6
34490 - 2 2 6 2 2 6 2 2 6 46 46 46
34491 - 2 2 6 2 2 6 2 2 6 2 2 6
34492 - 10 10 10 86 86 86 38 38 38 10 10 10
34493 - 0 0 0 0 0 0 0 0 0 0 0 0
34494 - 0 0 0 0 0 0 0 0 0 0 0 0
34495 - 0 0 0 0 0 0 0 0 0 0 0 0
34496 - 0 0 0 0 0 0 0 0 0 0 0 0
34497 - 0 0 0 0 0 0 0 0 0 0 0 0
34498 - 0 0 0 0 0 0 0 0 0 0 0 0
34499 - 0 0 0 0 0 0 0 0 0 0 0 0
34500 - 10 10 10 26 26 26 66 66 66 82 82 82
34501 - 2 2 6 22 22 22 18 18 18 2 2 6
34502 -149 149 149 253 253 253 253 253 253 253 253 253
34503 -253 253 253 253 253 253 253 253 253 253 253 253
34504 -253 253 253 253 253 253 234 234 234 242 242 242
34505 -253 253 253 253 253 253 253 253 253 253 253 253
34506 -253 253 253 253 253 253 253 253 253 253 253 253
34507 -253 253 253 253 253 253 253 253 253 253 253 253
34508 -253 253 253 253 253 253 253 253 253 253 253 253
34509 -253 253 253 253 253 253 206 206 206 2 2 6
34510 - 2 2 6 2 2 6 2 2 6 38 38 38
34511 - 2 2 6 2 2 6 2 2 6 2 2 6
34512 - 6 6 6 86 86 86 46 46 46 14 14 14
34513 - 0 0 0 0 0 0 0 0 0 0 0 0
34514 - 0 0 0 0 0 0 0 0 0 0 0 0
34515 - 0 0 0 0 0 0 0 0 0 0 0 0
34516 - 0 0 0 0 0 0 0 0 0 0 0 0
34517 - 0 0 0 0 0 0 0 0 0 0 0 0
34518 - 0 0 0 0 0 0 0 0 0 0 0 0
34519 - 0 0 0 0 0 0 0 0 0 6 6 6
34520 - 18 18 18 46 46 46 86 86 86 18 18 18
34521 - 2 2 6 34 34 34 10 10 10 6 6 6
34522 -210 210 210 253 253 253 253 253 253 253 253 253
34523 -253 253 253 253 253 253 253 253 253 253 253 253
34524 -253 253 253 253 253 253 234 234 234 242 242 242
34525 -253 253 253 253 253 253 253 253 253 253 253 253
34526 -253 253 253 253 253 253 253 253 253 253 253 253
34527 -253 253 253 253 253 253 253 253 253 253 253 253
34528 -253 253 253 253 253 253 253 253 253 253 253 253
34529 -253 253 253 253 253 253 221 221 221 6 6 6
34530 - 2 2 6 2 2 6 6 6 6 30 30 30
34531 - 2 2 6 2 2 6 2 2 6 2 2 6
34532 - 2 2 6 82 82 82 54 54 54 18 18 18
34533 - 6 6 6 0 0 0 0 0 0 0 0 0
34534 - 0 0 0 0 0 0 0 0 0 0 0 0
34535 - 0 0 0 0 0 0 0 0 0 0 0 0
34536 - 0 0 0 0 0 0 0 0 0 0 0 0
34537 - 0 0 0 0 0 0 0 0 0 0 0 0
34538 - 0 0 0 0 0 0 0 0 0 0 0 0
34539 - 0 0 0 0 0 0 0 0 0 10 10 10
34540 - 26 26 26 66 66 66 62 62 62 2 2 6
34541 - 2 2 6 38 38 38 10 10 10 26 26 26
34542 -238 238 238 253 253 253 253 253 253 253 253 253
34543 -253 253 253 253 253 253 253 253 253 253 253 253
34544 -253 253 253 253 253 253 231 231 231 238 238 238
34545 -253 253 253 253 253 253 253 253 253 253 253 253
34546 -253 253 253 253 253 253 253 253 253 253 253 253
34547 -253 253 253 253 253 253 253 253 253 253 253 253
34548 -253 253 253 253 253 253 253 253 253 253 253 253
34549 -253 253 253 253 253 253 231 231 231 6 6 6
34550 - 2 2 6 2 2 6 10 10 10 30 30 30
34551 - 2 2 6 2 2 6 2 2 6 2 2 6
34552 - 2 2 6 66 66 66 58 58 58 22 22 22
34553 - 6 6 6 0 0 0 0 0 0 0 0 0
34554 - 0 0 0 0 0 0 0 0 0 0 0 0
34555 - 0 0 0 0 0 0 0 0 0 0 0 0
34556 - 0 0 0 0 0 0 0 0 0 0 0 0
34557 - 0 0 0 0 0 0 0 0 0 0 0 0
34558 - 0 0 0 0 0 0 0 0 0 0 0 0
34559 - 0 0 0 0 0 0 0 0 0 10 10 10
34560 - 38 38 38 78 78 78 6 6 6 2 2 6
34561 - 2 2 6 46 46 46 14 14 14 42 42 42
34562 -246 246 246 253 253 253 253 253 253 253 253 253
34563 -253 253 253 253 253 253 253 253 253 253 253 253
34564 -253 253 253 253 253 253 231 231 231 242 242 242
34565 -253 253 253 253 253 253 253 253 253 253 253 253
34566 -253 253 253 253 253 253 253 253 253 253 253 253
34567 -253 253 253 253 253 253 253 253 253 253 253 253
34568 -253 253 253 253 253 253 253 253 253 253 253 253
34569 -253 253 253 253 253 253 234 234 234 10 10 10
34570 - 2 2 6 2 2 6 22 22 22 14 14 14
34571 - 2 2 6 2 2 6 2 2 6 2 2 6
34572 - 2 2 6 66 66 66 62 62 62 22 22 22
34573 - 6 6 6 0 0 0 0 0 0 0 0 0
34574 - 0 0 0 0 0 0 0 0 0 0 0 0
34575 - 0 0 0 0 0 0 0 0 0 0 0 0
34576 - 0 0 0 0 0 0 0 0 0 0 0 0
34577 - 0 0 0 0 0 0 0 0 0 0 0 0
34578 - 0 0 0 0 0 0 0 0 0 0 0 0
34579 - 0 0 0 0 0 0 6 6 6 18 18 18
34580 - 50 50 50 74 74 74 2 2 6 2 2 6
34581 - 14 14 14 70 70 70 34 34 34 62 62 62
34582 -250 250 250 253 253 253 253 253 253 253 253 253
34583 -253 253 253 253 253 253 253 253 253 253 253 253
34584 -253 253 253 253 253 253 231 231 231 246 246 246
34585 -253 253 253 253 253 253 253 253 253 253 253 253
34586 -253 253 253 253 253 253 253 253 253 253 253 253
34587 -253 253 253 253 253 253 253 253 253 253 253 253
34588 -253 253 253 253 253 253 253 253 253 253 253 253
34589 -253 253 253 253 253 253 234 234 234 14 14 14
34590 - 2 2 6 2 2 6 30 30 30 2 2 6
34591 - 2 2 6 2 2 6 2 2 6 2 2 6
34592 - 2 2 6 66 66 66 62 62 62 22 22 22
34593 - 6 6 6 0 0 0 0 0 0 0 0 0
34594 - 0 0 0 0 0 0 0 0 0 0 0 0
34595 - 0 0 0 0 0 0 0 0 0 0 0 0
34596 - 0 0 0 0 0 0 0 0 0 0 0 0
34597 - 0 0 0 0 0 0 0 0 0 0 0 0
34598 - 0 0 0 0 0 0 0 0 0 0 0 0
34599 - 0 0 0 0 0 0 6 6 6 18 18 18
34600 - 54 54 54 62 62 62 2 2 6 2 2 6
34601 - 2 2 6 30 30 30 46 46 46 70 70 70
34602 -250 250 250 253 253 253 253 253 253 253 253 253
34603 -253 253 253 253 253 253 253 253 253 253 253 253
34604 -253 253 253 253 253 253 231 231 231 246 246 246
34605 -253 253 253 253 253 253 253 253 253 253 253 253
34606 -253 253 253 253 253 253 253 253 253 253 253 253
34607 -253 253 253 253 253 253 253 253 253 253 253 253
34608 -253 253 253 253 253 253 253 253 253 253 253 253
34609 -253 253 253 253 253 253 226 226 226 10 10 10
34610 - 2 2 6 6 6 6 30 30 30 2 2 6
34611 - 2 2 6 2 2 6 2 2 6 2 2 6
34612 - 2 2 6 66 66 66 58 58 58 22 22 22
34613 - 6 6 6 0 0 0 0 0 0 0 0 0
34614 - 0 0 0 0 0 0 0 0 0 0 0 0
34615 - 0 0 0 0 0 0 0 0 0 0 0 0
34616 - 0 0 0 0 0 0 0 0 0 0 0 0
34617 - 0 0 0 0 0 0 0 0 0 0 0 0
34618 - 0 0 0 0 0 0 0 0 0 0 0 0
34619 - 0 0 0 0 0 0 6 6 6 22 22 22
34620 - 58 58 58 62 62 62 2 2 6 2 2 6
34621 - 2 2 6 2 2 6 30 30 30 78 78 78
34622 -250 250 250 253 253 253 253 253 253 253 253 253
34623 -253 253 253 253 253 253 253 253 253 253 253 253
34624 -253 253 253 253 253 253 231 231 231 246 246 246
34625 -253 253 253 253 253 253 253 253 253 253 253 253
34626 -253 253 253 253 253 253 253 253 253 253 253 253
34627 -253 253 253 253 253 253 253 253 253 253 253 253
34628 -253 253 253 253 253 253 253 253 253 253 253 253
34629 -253 253 253 253 253 253 206 206 206 2 2 6
34630 - 22 22 22 34 34 34 18 14 6 22 22 22
34631 - 26 26 26 18 18 18 6 6 6 2 2 6
34632 - 2 2 6 82 82 82 54 54 54 18 18 18
34633 - 6 6 6 0 0 0 0 0 0 0 0 0
34634 - 0 0 0 0 0 0 0 0 0 0 0 0
34635 - 0 0 0 0 0 0 0 0 0 0 0 0
34636 - 0 0 0 0 0 0 0 0 0 0 0 0
34637 - 0 0 0 0 0 0 0 0 0 0 0 0
34638 - 0 0 0 0 0 0 0 0 0 0 0 0
34639 - 0 0 0 0 0 0 6 6 6 26 26 26
34640 - 62 62 62 106 106 106 74 54 14 185 133 11
34641 -210 162 10 121 92 8 6 6 6 62 62 62
34642 -238 238 238 253 253 253 253 253 253 253 253 253
34643 -253 253 253 253 253 253 253 253 253 253 253 253
34644 -253 253 253 253 253 253 231 231 231 246 246 246
34645 -253 253 253 253 253 253 253 253 253 253 253 253
34646 -253 253 253 253 253 253 253 253 253 253 253 253
34647 -253 253 253 253 253 253 253 253 253 253 253 253
34648 -253 253 253 253 253 253 253 253 253 253 253 253
34649 -253 253 253 253 253 253 158 158 158 18 18 18
34650 - 14 14 14 2 2 6 2 2 6 2 2 6
34651 - 6 6 6 18 18 18 66 66 66 38 38 38
34652 - 6 6 6 94 94 94 50 50 50 18 18 18
34653 - 6 6 6 0 0 0 0 0 0 0 0 0
34654 - 0 0 0 0 0 0 0 0 0 0 0 0
34655 - 0 0 0 0 0 0 0 0 0 0 0 0
34656 - 0 0 0 0 0 0 0 0 0 0 0 0
34657 - 0 0 0 0 0 0 0 0 0 0 0 0
34658 - 0 0 0 0 0 0 0 0 0 6 6 6
34659 - 10 10 10 10 10 10 18 18 18 38 38 38
34660 - 78 78 78 142 134 106 216 158 10 242 186 14
34661 -246 190 14 246 190 14 156 118 10 10 10 10
34662 - 90 90 90 238 238 238 253 253 253 253 253 253
34663 -253 253 253 253 253 253 253 253 253 253 253 253
34664 -253 253 253 253 253 253 231 231 231 250 250 250
34665 -253 253 253 253 253 253 253 253 253 253 253 253
34666 -253 253 253 253 253 253 253 253 253 253 253 253
34667 -253 253 253 253 253 253 253 253 253 253 253 253
34668 -253 253 253 253 253 253 253 253 253 246 230 190
34669 -238 204 91 238 204 91 181 142 44 37 26 9
34670 - 2 2 6 2 2 6 2 2 6 2 2 6
34671 - 2 2 6 2 2 6 38 38 38 46 46 46
34672 - 26 26 26 106 106 106 54 54 54 18 18 18
34673 - 6 6 6 0 0 0 0 0 0 0 0 0
34674 - 0 0 0 0 0 0 0 0 0 0 0 0
34675 - 0 0 0 0 0 0 0 0 0 0 0 0
34676 - 0 0 0 0 0 0 0 0 0 0 0 0
34677 - 0 0 0 0 0 0 0 0 0 0 0 0
34678 - 0 0 0 6 6 6 14 14 14 22 22 22
34679 - 30 30 30 38 38 38 50 50 50 70 70 70
34680 -106 106 106 190 142 34 226 170 11 242 186 14
34681 -246 190 14 246 190 14 246 190 14 154 114 10
34682 - 6 6 6 74 74 74 226 226 226 253 253 253
34683 -253 253 253 253 253 253 253 253 253 253 253 253
34684 -253 253 253 253 253 253 231 231 231 250 250 250
34685 -253 253 253 253 253 253 253 253 253 253 253 253
34686 -253 253 253 253 253 253 253 253 253 253 253 253
34687 -253 253 253 253 253 253 253 253 253 253 253 253
34688 -253 253 253 253 253 253 253 253 253 228 184 62
34689 -241 196 14 241 208 19 232 195 16 38 30 10
34690 - 2 2 6 2 2 6 2 2 6 2 2 6
34691 - 2 2 6 6 6 6 30 30 30 26 26 26
34692 -203 166 17 154 142 90 66 66 66 26 26 26
34693 - 6 6 6 0 0 0 0 0 0 0 0 0
34694 - 0 0 0 0 0 0 0 0 0 0 0 0
34695 - 0 0 0 0 0 0 0 0 0 0 0 0
34696 - 0 0 0 0 0 0 0 0 0 0 0 0
34697 - 0 0 0 0 0 0 0 0 0 0 0 0
34698 - 6 6 6 18 18 18 38 38 38 58 58 58
34699 - 78 78 78 86 86 86 101 101 101 123 123 123
34700 -175 146 61 210 150 10 234 174 13 246 186 14
34701 -246 190 14 246 190 14 246 190 14 238 190 10
34702 -102 78 10 2 2 6 46 46 46 198 198 198
34703 -253 253 253 253 253 253 253 253 253 253 253 253
34704 -253 253 253 253 253 253 234 234 234 242 242 242
34705 -253 253 253 253 253 253 253 253 253 253 253 253
34706 -253 253 253 253 253 253 253 253 253 253 253 253
34707 -253 253 253 253 253 253 253 253 253 253 253 253
34708 -253 253 253 253 253 253 253 253 253 224 178 62
34709 -242 186 14 241 196 14 210 166 10 22 18 6
34710 - 2 2 6 2 2 6 2 2 6 2 2 6
34711 - 2 2 6 2 2 6 6 6 6 121 92 8
34712 -238 202 15 232 195 16 82 82 82 34 34 34
34713 - 10 10 10 0 0 0 0 0 0 0 0 0
34714 - 0 0 0 0 0 0 0 0 0 0 0 0
34715 - 0 0 0 0 0 0 0 0 0 0 0 0
34716 - 0 0 0 0 0 0 0 0 0 0 0 0
34717 - 0 0 0 0 0 0 0 0 0 0 0 0
34718 - 14 14 14 38 38 38 70 70 70 154 122 46
34719 -190 142 34 200 144 11 197 138 11 197 138 11
34720 -213 154 11 226 170 11 242 186 14 246 190 14
34721 -246 190 14 246 190 14 246 190 14 246 190 14
34722 -225 175 15 46 32 6 2 2 6 22 22 22
34723 -158 158 158 250 250 250 253 253 253 253 253 253
34724 -253 253 253 253 253 253 253 253 253 253 253 253
34725 -253 253 253 253 253 253 253 253 253 253 253 253
34726 -253 253 253 253 253 253 253 253 253 253 253 253
34727 -253 253 253 253 253 253 253 253 253 253 253 253
34728 -253 253 253 250 250 250 242 242 242 224 178 62
34729 -239 182 13 236 186 11 213 154 11 46 32 6
34730 - 2 2 6 2 2 6 2 2 6 2 2 6
34731 - 2 2 6 2 2 6 61 42 6 225 175 15
34732 -238 190 10 236 186 11 112 100 78 42 42 42
34733 - 14 14 14 0 0 0 0 0 0 0 0 0
34734 - 0 0 0 0 0 0 0 0 0 0 0 0
34735 - 0 0 0 0 0 0 0 0 0 0 0 0
34736 - 0 0 0 0 0 0 0 0 0 0 0 0
34737 - 0 0 0 0 0 0 0 0 0 6 6 6
34738 - 22 22 22 54 54 54 154 122 46 213 154 11
34739 -226 170 11 230 174 11 226 170 11 226 170 11
34740 -236 178 12 242 186 14 246 190 14 246 190 14
34741 -246 190 14 246 190 14 246 190 14 246 190 14
34742 -241 196 14 184 144 12 10 10 10 2 2 6
34743 - 6 6 6 116 116 116 242 242 242 253 253 253
34744 -253 253 253 253 253 253 253 253 253 253 253 253
34745 -253 253 253 253 253 253 253 253 253 253 253 253
34746 -253 253 253 253 253 253 253 253 253 253 253 253
34747 -253 253 253 253 253 253 253 253 253 253 253 253
34748 -253 253 253 231 231 231 198 198 198 214 170 54
34749 -236 178 12 236 178 12 210 150 10 137 92 6
34750 - 18 14 6 2 2 6 2 2 6 2 2 6
34751 - 6 6 6 70 47 6 200 144 11 236 178 12
34752 -239 182 13 239 182 13 124 112 88 58 58 58
34753 - 22 22 22 6 6 6 0 0 0 0 0 0
34754 - 0 0 0 0 0 0 0 0 0 0 0 0
34755 - 0 0 0 0 0 0 0 0 0 0 0 0
34756 - 0 0 0 0 0 0 0 0 0 0 0 0
34757 - 0 0 0 0 0 0 0 0 0 10 10 10
34758 - 30 30 30 70 70 70 180 133 36 226 170 11
34759 -239 182 13 242 186 14 242 186 14 246 186 14
34760 -246 190 14 246 190 14 246 190 14 246 190 14
34761 -246 190 14 246 190 14 246 190 14 246 190 14
34762 -246 190 14 232 195 16 98 70 6 2 2 6
34763 - 2 2 6 2 2 6 66 66 66 221 221 221
34764 -253 253 253 253 253 253 253 253 253 253 253 253
34765 -253 253 253 253 253 253 253 253 253 253 253 253
34766 -253 253 253 253 253 253 253 253 253 253 253 253
34767 -253 253 253 253 253 253 253 253 253 253 253 253
34768 -253 253 253 206 206 206 198 198 198 214 166 58
34769 -230 174 11 230 174 11 216 158 10 192 133 9
34770 -163 110 8 116 81 8 102 78 10 116 81 8
34771 -167 114 7 197 138 11 226 170 11 239 182 13
34772 -242 186 14 242 186 14 162 146 94 78 78 78
34773 - 34 34 34 14 14 14 6 6 6 0 0 0
34774 - 0 0 0 0 0 0 0 0 0 0 0 0
34775 - 0 0 0 0 0 0 0 0 0 0 0 0
34776 - 0 0 0 0 0 0 0 0 0 0 0 0
34777 - 0 0 0 0 0 0 0 0 0 6 6 6
34778 - 30 30 30 78 78 78 190 142 34 226 170 11
34779 -239 182 13 246 190 14 246 190 14 246 190 14
34780 -246 190 14 246 190 14 246 190 14 246 190 14
34781 -246 190 14 246 190 14 246 190 14 246 190 14
34782 -246 190 14 241 196 14 203 166 17 22 18 6
34783 - 2 2 6 2 2 6 2 2 6 38 38 38
34784 -218 218 218 253 253 253 253 253 253 253 253 253
34785 -253 253 253 253 253 253 253 253 253 253 253 253
34786 -253 253 253 253 253 253 253 253 253 253 253 253
34787 -253 253 253 253 253 253 253 253 253 253 253 253
34788 -250 250 250 206 206 206 198 198 198 202 162 69
34789 -226 170 11 236 178 12 224 166 10 210 150 10
34790 -200 144 11 197 138 11 192 133 9 197 138 11
34791 -210 150 10 226 170 11 242 186 14 246 190 14
34792 -246 190 14 246 186 14 225 175 15 124 112 88
34793 - 62 62 62 30 30 30 14 14 14 6 6 6
34794 - 0 0 0 0 0 0 0 0 0 0 0 0
34795 - 0 0 0 0 0 0 0 0 0 0 0 0
34796 - 0 0 0 0 0 0 0 0 0 0 0 0
34797 - 0 0 0 0 0 0 0 0 0 10 10 10
34798 - 30 30 30 78 78 78 174 135 50 224 166 10
34799 -239 182 13 246 190 14 246 190 14 246 190 14
34800 -246 190 14 246 190 14 246 190 14 246 190 14
34801 -246 190 14 246 190 14 246 190 14 246 190 14
34802 -246 190 14 246 190 14 241 196 14 139 102 15
34803 - 2 2 6 2 2 6 2 2 6 2 2 6
34804 - 78 78 78 250 250 250 253 253 253 253 253 253
34805 -253 253 253 253 253 253 253 253 253 253 253 253
34806 -253 253 253 253 253 253 253 253 253 253 253 253
34807 -253 253 253 253 253 253 253 253 253 253 253 253
34808 -250 250 250 214 214 214 198 198 198 190 150 46
34809 -219 162 10 236 178 12 234 174 13 224 166 10
34810 -216 158 10 213 154 11 213 154 11 216 158 10
34811 -226 170 11 239 182 13 246 190 14 246 190 14
34812 -246 190 14 246 190 14 242 186 14 206 162 42
34813 -101 101 101 58 58 58 30 30 30 14 14 14
34814 - 6 6 6 0 0 0 0 0 0 0 0 0
34815 - 0 0 0 0 0 0 0 0 0 0 0 0
34816 - 0 0 0 0 0 0 0 0 0 0 0 0
34817 - 0 0 0 0 0 0 0 0 0 10 10 10
34818 - 30 30 30 74 74 74 174 135 50 216 158 10
34819 -236 178 12 246 190 14 246 190 14 246 190 14
34820 -246 190 14 246 190 14 246 190 14 246 190 14
34821 -246 190 14 246 190 14 246 190 14 246 190 14
34822 -246 190 14 246 190 14 241 196 14 226 184 13
34823 - 61 42 6 2 2 6 2 2 6 2 2 6
34824 - 22 22 22 238 238 238 253 253 253 253 253 253
34825 -253 253 253 253 253 253 253 253 253 253 253 253
34826 -253 253 253 253 253 253 253 253 253 253 253 253
34827 -253 253 253 253 253 253 253 253 253 253 253 253
34828 -253 253 253 226 226 226 187 187 187 180 133 36
34829 -216 158 10 236 178 12 239 182 13 236 178 12
34830 -230 174 11 226 170 11 226 170 11 230 174 11
34831 -236 178 12 242 186 14 246 190 14 246 190 14
34832 -246 190 14 246 190 14 246 186 14 239 182 13
34833 -206 162 42 106 106 106 66 66 66 34 34 34
34834 - 14 14 14 6 6 6 0 0 0 0 0 0
34835 - 0 0 0 0 0 0 0 0 0 0 0 0
34836 - 0 0 0 0 0 0 0 0 0 0 0 0
34837 - 0 0 0 0 0 0 0 0 0 6 6 6
34838 - 26 26 26 70 70 70 163 133 67 213 154 11
34839 -236 178 12 246 190 14 246 190 14 246 190 14
34840 -246 190 14 246 190 14 246 190 14 246 190 14
34841 -246 190 14 246 190 14 246 190 14 246 190 14
34842 -246 190 14 246 190 14 246 190 14 241 196 14
34843 -190 146 13 18 14 6 2 2 6 2 2 6
34844 - 46 46 46 246 246 246 253 253 253 253 253 253
34845 -253 253 253 253 253 253 253 253 253 253 253 253
34846 -253 253 253 253 253 253 253 253 253 253 253 253
34847 -253 253 253 253 253 253 253 253 253 253 253 253
34848 -253 253 253 221 221 221 86 86 86 156 107 11
34849 -216 158 10 236 178 12 242 186 14 246 186 14
34850 -242 186 14 239 182 13 239 182 13 242 186 14
34851 -242 186 14 246 186 14 246 190 14 246 190 14
34852 -246 190 14 246 190 14 246 190 14 246 190 14
34853 -242 186 14 225 175 15 142 122 72 66 66 66
34854 - 30 30 30 10 10 10 0 0 0 0 0 0
34855 - 0 0 0 0 0 0 0 0 0 0 0 0
34856 - 0 0 0 0 0 0 0 0 0 0 0 0
34857 - 0 0 0 0 0 0 0 0 0 6 6 6
34858 - 26 26 26 70 70 70 163 133 67 210 150 10
34859 -236 178 12 246 190 14 246 190 14 246 190 14
34860 -246 190 14 246 190 14 246 190 14 246 190 14
34861 -246 190 14 246 190 14 246 190 14 246 190 14
34862 -246 190 14 246 190 14 246 190 14 246 190 14
34863 -232 195 16 121 92 8 34 34 34 106 106 106
34864 -221 221 221 253 253 253 253 253 253 253 253 253
34865 -253 253 253 253 253 253 253 253 253 253 253 253
34866 -253 253 253 253 253 253 253 253 253 253 253 253
34867 -253 253 253 253 253 253 253 253 253 253 253 253
34868 -242 242 242 82 82 82 18 14 6 163 110 8
34869 -216 158 10 236 178 12 242 186 14 246 190 14
34870 -246 190 14 246 190 14 246 190 14 246 190 14
34871 -246 190 14 246 190 14 246 190 14 246 190 14
34872 -246 190 14 246 190 14 246 190 14 246 190 14
34873 -246 190 14 246 190 14 242 186 14 163 133 67
34874 - 46 46 46 18 18 18 6 6 6 0 0 0
34875 - 0 0 0 0 0 0 0 0 0 0 0 0
34876 - 0 0 0 0 0 0 0 0 0 0 0 0
34877 - 0 0 0 0 0 0 0 0 0 10 10 10
34878 - 30 30 30 78 78 78 163 133 67 210 150 10
34879 -236 178 12 246 186 14 246 190 14 246 190 14
34880 -246 190 14 246 190 14 246 190 14 246 190 14
34881 -246 190 14 246 190 14 246 190 14 246 190 14
34882 -246 190 14 246 190 14 246 190 14 246 190 14
34883 -241 196 14 215 174 15 190 178 144 253 253 253
34884 -253 253 253 253 253 253 253 253 253 253 253 253
34885 -253 253 253 253 253 253 253 253 253 253 253 253
34886 -253 253 253 253 253 253 253 253 253 253 253 253
34887 -253 253 253 253 253 253 253 253 253 218 218 218
34888 - 58 58 58 2 2 6 22 18 6 167 114 7
34889 -216 158 10 236 178 12 246 186 14 246 190 14
34890 -246 190 14 246 190 14 246 190 14 246 190 14
34891 -246 190 14 246 190 14 246 190 14 246 190 14
34892 -246 190 14 246 190 14 246 190 14 246 190 14
34893 -246 190 14 246 186 14 242 186 14 190 150 46
34894 - 54 54 54 22 22 22 6 6 6 0 0 0
34895 - 0 0 0 0 0 0 0 0 0 0 0 0
34896 - 0 0 0 0 0 0 0 0 0 0 0 0
34897 - 0 0 0 0 0 0 0 0 0 14 14 14
34898 - 38 38 38 86 86 86 180 133 36 213 154 11
34899 -236 178 12 246 186 14 246 190 14 246 190 14
34900 -246 190 14 246 190 14 246 190 14 246 190 14
34901 -246 190 14 246 190 14 246 190 14 246 190 14
34902 -246 190 14 246 190 14 246 190 14 246 190 14
34903 -246 190 14 232 195 16 190 146 13 214 214 214
34904 -253 253 253 253 253 253 253 253 253 253 253 253
34905 -253 253 253 253 253 253 253 253 253 253 253 253
34906 -253 253 253 253 253 253 253 253 253 253 253 253
34907 -253 253 253 250 250 250 170 170 170 26 26 26
34908 - 2 2 6 2 2 6 37 26 9 163 110 8
34909 -219 162 10 239 182 13 246 186 14 246 190 14
34910 -246 190 14 246 190 14 246 190 14 246 190 14
34911 -246 190 14 246 190 14 246 190 14 246 190 14
34912 -246 190 14 246 190 14 246 190 14 246 190 14
34913 -246 186 14 236 178 12 224 166 10 142 122 72
34914 - 46 46 46 18 18 18 6 6 6 0 0 0
34915 - 0 0 0 0 0 0 0 0 0 0 0 0
34916 - 0 0 0 0 0 0 0 0 0 0 0 0
34917 - 0 0 0 0 0 0 6 6 6 18 18 18
34918 - 50 50 50 109 106 95 192 133 9 224 166 10
34919 -242 186 14 246 190 14 246 190 14 246 190 14
34920 -246 190 14 246 190 14 246 190 14 246 190 14
34921 -246 190 14 246 190 14 246 190 14 246 190 14
34922 -246 190 14 246 190 14 246 190 14 246 190 14
34923 -242 186 14 226 184 13 210 162 10 142 110 46
34924 -226 226 226 253 253 253 253 253 253 253 253 253
34925 -253 253 253 253 253 253 253 253 253 253 253 253
34926 -253 253 253 253 253 253 253 253 253 253 253 253
34927 -198 198 198 66 66 66 2 2 6 2 2 6
34928 - 2 2 6 2 2 6 50 34 6 156 107 11
34929 -219 162 10 239 182 13 246 186 14 246 190 14
34930 -246 190 14 246 190 14 246 190 14 246 190 14
34931 -246 190 14 246 190 14 246 190 14 246 190 14
34932 -246 190 14 246 190 14 246 190 14 242 186 14
34933 -234 174 13 213 154 11 154 122 46 66 66 66
34934 - 30 30 30 10 10 10 0 0 0 0 0 0
34935 - 0 0 0 0 0 0 0 0 0 0 0 0
34936 - 0 0 0 0 0 0 0 0 0 0 0 0
34937 - 0 0 0 0 0 0 6 6 6 22 22 22
34938 - 58 58 58 154 121 60 206 145 10 234 174 13
34939 -242 186 14 246 186 14 246 190 14 246 190 14
34940 -246 190 14 246 190 14 246 190 14 246 190 14
34941 -246 190 14 246 190 14 246 190 14 246 190 14
34942 -246 190 14 246 190 14 246 190 14 246 190 14
34943 -246 186 14 236 178 12 210 162 10 163 110 8
34944 - 61 42 6 138 138 138 218 218 218 250 250 250
34945 -253 253 253 253 253 253 253 253 253 250 250 250
34946 -242 242 242 210 210 210 144 144 144 66 66 66
34947 - 6 6 6 2 2 6 2 2 6 2 2 6
34948 - 2 2 6 2 2 6 61 42 6 163 110 8
34949 -216 158 10 236 178 12 246 190 14 246 190 14
34950 -246 190 14 246 190 14 246 190 14 246 190 14
34951 -246 190 14 246 190 14 246 190 14 246 190 14
34952 -246 190 14 239 182 13 230 174 11 216 158 10
34953 -190 142 34 124 112 88 70 70 70 38 38 38
34954 - 18 18 18 6 6 6 0 0 0 0 0 0
34955 - 0 0 0 0 0 0 0 0 0 0 0 0
34956 - 0 0 0 0 0 0 0 0 0 0 0 0
34957 - 0 0 0 0 0 0 6 6 6 22 22 22
34958 - 62 62 62 168 124 44 206 145 10 224 166 10
34959 -236 178 12 239 182 13 242 186 14 242 186 14
34960 -246 186 14 246 190 14 246 190 14 246 190 14
34961 -246 190 14 246 190 14 246 190 14 246 190 14
34962 -246 190 14 246 190 14 246 190 14 246 190 14
34963 -246 190 14 236 178 12 216 158 10 175 118 6
34964 - 80 54 7 2 2 6 6 6 6 30 30 30
34965 - 54 54 54 62 62 62 50 50 50 38 38 38
34966 - 14 14 14 2 2 6 2 2 6 2 2 6
34967 - 2 2 6 2 2 6 2 2 6 2 2 6
34968 - 2 2 6 6 6 6 80 54 7 167 114 7
34969 -213 154 11 236 178 12 246 190 14 246 190 14
34970 -246 190 14 246 190 14 246 190 14 246 190 14
34971 -246 190 14 242 186 14 239 182 13 239 182 13
34972 -230 174 11 210 150 10 174 135 50 124 112 88
34973 - 82 82 82 54 54 54 34 34 34 18 18 18
34974 - 6 6 6 0 0 0 0 0 0 0 0 0
34975 - 0 0 0 0 0 0 0 0 0 0 0 0
34976 - 0 0 0 0 0 0 0 0 0 0 0 0
34977 - 0 0 0 0 0 0 6 6 6 18 18 18
34978 - 50 50 50 158 118 36 192 133 9 200 144 11
34979 -216 158 10 219 162 10 224 166 10 226 170 11
34980 -230 174 11 236 178 12 239 182 13 239 182 13
34981 -242 186 14 246 186 14 246 190 14 246 190 14
34982 -246 190 14 246 190 14 246 190 14 246 190 14
34983 -246 186 14 230 174 11 210 150 10 163 110 8
34984 -104 69 6 10 10 10 2 2 6 2 2 6
34985 - 2 2 6 2 2 6 2 2 6 2 2 6
34986 - 2 2 6 2 2 6 2 2 6 2 2 6
34987 - 2 2 6 2 2 6 2 2 6 2 2 6
34988 - 2 2 6 6 6 6 91 60 6 167 114 7
34989 -206 145 10 230 174 11 242 186 14 246 190 14
34990 -246 190 14 246 190 14 246 186 14 242 186 14
34991 -239 182 13 230 174 11 224 166 10 213 154 11
34992 -180 133 36 124 112 88 86 86 86 58 58 58
34993 - 38 38 38 22 22 22 10 10 10 6 6 6
34994 - 0 0 0 0 0 0 0 0 0 0 0 0
34995 - 0 0 0 0 0 0 0 0 0 0 0 0
34996 - 0 0 0 0 0 0 0 0 0 0 0 0
34997 - 0 0 0 0 0 0 0 0 0 14 14 14
34998 - 34 34 34 70 70 70 138 110 50 158 118 36
34999 -167 114 7 180 123 7 192 133 9 197 138 11
35000 -200 144 11 206 145 10 213 154 11 219 162 10
35001 -224 166 10 230 174 11 239 182 13 242 186 14
35002 -246 186 14 246 186 14 246 186 14 246 186 14
35003 -239 182 13 216 158 10 185 133 11 152 99 6
35004 -104 69 6 18 14 6 2 2 6 2 2 6
35005 - 2 2 6 2 2 6 2 2 6 2 2 6
35006 - 2 2 6 2 2 6 2 2 6 2 2 6
35007 - 2 2 6 2 2 6 2 2 6 2 2 6
35008 - 2 2 6 6 6 6 80 54 7 152 99 6
35009 -192 133 9 219 162 10 236 178 12 239 182 13
35010 -246 186 14 242 186 14 239 182 13 236 178 12
35011 -224 166 10 206 145 10 192 133 9 154 121 60
35012 - 94 94 94 62 62 62 42 42 42 22 22 22
35013 - 14 14 14 6 6 6 0 0 0 0 0 0
35014 - 0 0 0 0 0 0 0 0 0 0 0 0
35015 - 0 0 0 0 0 0 0 0 0 0 0 0
35016 - 0 0 0 0 0 0 0 0 0 0 0 0
35017 - 0 0 0 0 0 0 0 0 0 6 6 6
35018 - 18 18 18 34 34 34 58 58 58 78 78 78
35019 -101 98 89 124 112 88 142 110 46 156 107 11
35020 -163 110 8 167 114 7 175 118 6 180 123 7
35021 -185 133 11 197 138 11 210 150 10 219 162 10
35022 -226 170 11 236 178 12 236 178 12 234 174 13
35023 -219 162 10 197 138 11 163 110 8 130 83 6
35024 - 91 60 6 10 10 10 2 2 6 2 2 6
35025 - 18 18 18 38 38 38 38 38 38 38 38 38
35026 - 38 38 38 38 38 38 38 38 38 38 38 38
35027 - 38 38 38 38 38 38 26 26 26 2 2 6
35028 - 2 2 6 6 6 6 70 47 6 137 92 6
35029 -175 118 6 200 144 11 219 162 10 230 174 11
35030 -234 174 13 230 174 11 219 162 10 210 150 10
35031 -192 133 9 163 110 8 124 112 88 82 82 82
35032 - 50 50 50 30 30 30 14 14 14 6 6 6
35033 - 0 0 0 0 0 0 0 0 0 0 0 0
35034 - 0 0 0 0 0 0 0 0 0 0 0 0
35035 - 0 0 0 0 0 0 0 0 0 0 0 0
35036 - 0 0 0 0 0 0 0 0 0 0 0 0
35037 - 0 0 0 0 0 0 0 0 0 0 0 0
35038 - 6 6 6 14 14 14 22 22 22 34 34 34
35039 - 42 42 42 58 58 58 74 74 74 86 86 86
35040 -101 98 89 122 102 70 130 98 46 121 87 25
35041 -137 92 6 152 99 6 163 110 8 180 123 7
35042 -185 133 11 197 138 11 206 145 10 200 144 11
35043 -180 123 7 156 107 11 130 83 6 104 69 6
35044 - 50 34 6 54 54 54 110 110 110 101 98 89
35045 - 86 86 86 82 82 82 78 78 78 78 78 78
35046 - 78 78 78 78 78 78 78 78 78 78 78 78
35047 - 78 78 78 82 82 82 86 86 86 94 94 94
35048 -106 106 106 101 101 101 86 66 34 124 80 6
35049 -156 107 11 180 123 7 192 133 9 200 144 11
35050 -206 145 10 200 144 11 192 133 9 175 118 6
35051 -139 102 15 109 106 95 70 70 70 42 42 42
35052 - 22 22 22 10 10 10 0 0 0 0 0 0
35053 - 0 0 0 0 0 0 0 0 0 0 0 0
35054 - 0 0 0 0 0 0 0 0 0 0 0 0
35055 - 0 0 0 0 0 0 0 0 0 0 0 0
35056 - 0 0 0 0 0 0 0 0 0 0 0 0
35057 - 0 0 0 0 0 0 0 0 0 0 0 0
35058 - 0 0 0 0 0 0 6 6 6 10 10 10
35059 - 14 14 14 22 22 22 30 30 30 38 38 38
35060 - 50 50 50 62 62 62 74 74 74 90 90 90
35061 -101 98 89 112 100 78 121 87 25 124 80 6
35062 -137 92 6 152 99 6 152 99 6 152 99 6
35063 -138 86 6 124 80 6 98 70 6 86 66 30
35064 -101 98 89 82 82 82 58 58 58 46 46 46
35065 - 38 38 38 34 34 34 34 34 34 34 34 34
35066 - 34 34 34 34 34 34 34 34 34 34 34 34
35067 - 34 34 34 34 34 34 38 38 38 42 42 42
35068 - 54 54 54 82 82 82 94 86 76 91 60 6
35069 -134 86 6 156 107 11 167 114 7 175 118 6
35070 -175 118 6 167 114 7 152 99 6 121 87 25
35071 -101 98 89 62 62 62 34 34 34 18 18 18
35072 - 6 6 6 0 0 0 0 0 0 0 0 0
35073 - 0 0 0 0 0 0 0 0 0 0 0 0
35074 - 0 0 0 0 0 0 0 0 0 0 0 0
35075 - 0 0 0 0 0 0 0 0 0 0 0 0
35076 - 0 0 0 0 0 0 0 0 0 0 0 0
35077 - 0 0 0 0 0 0 0 0 0 0 0 0
35078 - 0 0 0 0 0 0 0 0 0 0 0 0
35079 - 0 0 0 6 6 6 6 6 6 10 10 10
35080 - 18 18 18 22 22 22 30 30 30 42 42 42
35081 - 50 50 50 66 66 66 86 86 86 101 98 89
35082 -106 86 58 98 70 6 104 69 6 104 69 6
35083 -104 69 6 91 60 6 82 62 34 90 90 90
35084 - 62 62 62 38 38 38 22 22 22 14 14 14
35085 - 10 10 10 10 10 10 10 10 10 10 10 10
35086 - 10 10 10 10 10 10 6 6 6 10 10 10
35087 - 10 10 10 10 10 10 10 10 10 14 14 14
35088 - 22 22 22 42 42 42 70 70 70 89 81 66
35089 - 80 54 7 104 69 6 124 80 6 137 92 6
35090 -134 86 6 116 81 8 100 82 52 86 86 86
35091 - 58 58 58 30 30 30 14 14 14 6 6 6
35092 - 0 0 0 0 0 0 0 0 0 0 0 0
35093 - 0 0 0 0 0 0 0 0 0 0 0 0
35094 - 0 0 0 0 0 0 0 0 0 0 0 0
35095 - 0 0 0 0 0 0 0 0 0 0 0 0
35096 - 0 0 0 0 0 0 0 0 0 0 0 0
35097 - 0 0 0 0 0 0 0 0 0 0 0 0
35098 - 0 0 0 0 0 0 0 0 0 0 0 0
35099 - 0 0 0 0 0 0 0 0 0 0 0 0
35100 - 0 0 0 6 6 6 10 10 10 14 14 14
35101 - 18 18 18 26 26 26 38 38 38 54 54 54
35102 - 70 70 70 86 86 86 94 86 76 89 81 66
35103 - 89 81 66 86 86 86 74 74 74 50 50 50
35104 - 30 30 30 14 14 14 6 6 6 0 0 0
35105 - 0 0 0 0 0 0 0 0 0 0 0 0
35106 - 0 0 0 0 0 0 0 0 0 0 0 0
35107 - 0 0 0 0 0 0 0 0 0 0 0 0
35108 - 6 6 6 18 18 18 34 34 34 58 58 58
35109 - 82 82 82 89 81 66 89 81 66 89 81 66
35110 - 94 86 66 94 86 76 74 74 74 50 50 50
35111 - 26 26 26 14 14 14 6 6 6 0 0 0
35112 - 0 0 0 0 0 0 0 0 0 0 0 0
35113 - 0 0 0 0 0 0 0 0 0 0 0 0
35114 - 0 0 0 0 0 0 0 0 0 0 0 0
35115 - 0 0 0 0 0 0 0 0 0 0 0 0
35116 - 0 0 0 0 0 0 0 0 0 0 0 0
35117 - 0 0 0 0 0 0 0 0 0 0 0 0
35118 - 0 0 0 0 0 0 0 0 0 0 0 0
35119 - 0 0 0 0 0 0 0 0 0 0 0 0
35120 - 0 0 0 0 0 0 0 0 0 0 0 0
35121 - 6 6 6 6 6 6 14 14 14 18 18 18
35122 - 30 30 30 38 38 38 46 46 46 54 54 54
35123 - 50 50 50 42 42 42 30 30 30 18 18 18
35124 - 10 10 10 0 0 0 0 0 0 0 0 0
35125 - 0 0 0 0 0 0 0 0 0 0 0 0
35126 - 0 0 0 0 0 0 0 0 0 0 0 0
35127 - 0 0 0 0 0 0 0 0 0 0 0 0
35128 - 0 0 0 6 6 6 14 14 14 26 26 26
35129 - 38 38 38 50 50 50 58 58 58 58 58 58
35130 - 54 54 54 42 42 42 30 30 30 18 18 18
35131 - 10 10 10 0 0 0 0 0 0 0 0 0
35132 - 0 0 0 0 0 0 0 0 0 0 0 0
35133 - 0 0 0 0 0 0 0 0 0 0 0 0
35134 - 0 0 0 0 0 0 0 0 0 0 0 0
35135 - 0 0 0 0 0 0 0 0 0 0 0 0
35136 - 0 0 0 0 0 0 0 0 0 0 0 0
35137 - 0 0 0 0 0 0 0 0 0 0 0 0
35138 - 0 0 0 0 0 0 0 0 0 0 0 0
35139 - 0 0 0 0 0 0 0 0 0 0 0 0
35140 - 0 0 0 0 0 0 0 0 0 0 0 0
35141 - 0 0 0 0 0 0 0 0 0 6 6 6
35142 - 6 6 6 10 10 10 14 14 14 18 18 18
35143 - 18 18 18 14 14 14 10 10 10 6 6 6
35144 - 0 0 0 0 0 0 0 0 0 0 0 0
35145 - 0 0 0 0 0 0 0 0 0 0 0 0
35146 - 0 0 0 0 0 0 0 0 0 0 0 0
35147 - 0 0 0 0 0 0 0 0 0 0 0 0
35148 - 0 0 0 0 0 0 0 0 0 6 6 6
35149 - 14 14 14 18 18 18 22 22 22 22 22 22
35150 - 18 18 18 14 14 14 10 10 10 6 6 6
35151 - 0 0 0 0 0 0 0 0 0 0 0 0
35152 - 0 0 0 0 0 0 0 0 0 0 0 0
35153 - 0 0 0 0 0 0 0 0 0 0 0 0
35154 - 0 0 0 0 0 0 0 0 0 0 0 0
35155 - 0 0 0 0 0 0 0 0 0 0 0 0
35156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35169 +4 4 4 4 4 4
35170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35183 +4 4 4 4 4 4
35184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35197 +4 4 4 4 4 4
35198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35211 +4 4 4 4 4 4
35212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35225 +4 4 4 4 4 4
35226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35239 +4 4 4 4 4 4
35240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35244 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
35245 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
35246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35249 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
35250 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35251 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
35252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35253 +4 4 4 4 4 4
35254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35258 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
35259 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
35260 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35263 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
35264 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
35265 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
35266 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35267 +4 4 4 4 4 4
35268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35272 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
35273 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
35274 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35277 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
35278 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
35279 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
35280 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
35281 +4 4 4 4 4 4
35282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35285 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
35286 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
35287 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
35288 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
35289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35290 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35291 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
35292 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
35293 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
35294 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
35295 +4 4 4 4 4 4
35296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35299 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
35300 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
35301 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
35302 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
35303 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35304 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
35305 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
35306 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
35307 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
35308 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
35309 +4 4 4 4 4 4
35310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
35313 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
35314 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
35315 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
35316 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
35317 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
35318 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
35319 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
35320 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
35321 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
35322 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
35323 +4 4 4 4 4 4
35324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35326 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
35327 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
35328 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
35329 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
35330 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
35331 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
35332 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
35333 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
35334 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
35335 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
35336 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
35337 +4 4 4 4 4 4
35338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35340 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
35341 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
35342 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
35343 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
35344 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
35345 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
35346 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
35347 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
35348 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
35349 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
35350 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
35351 +4 4 4 4 4 4
35352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35354 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
35355 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
35356 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
35357 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
35358 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
35359 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
35360 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
35361 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
35362 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
35363 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
35364 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35365 +4 4 4 4 4 4
35366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35368 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
35369 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
35370 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
35371 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
35372 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
35373 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
35374 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
35375 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
35376 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
35377 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
35378 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
35379 +4 4 4 4 4 4
35380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35381 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
35382 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
35383 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
35384 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
35385 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
35386 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
35387 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
35388 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
35389 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
35390 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
35391 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
35392 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
35393 +4 4 4 4 4 4
35394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35395 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
35396 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
35397 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
35398 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
35399 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
35400 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
35401 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
35402 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
35403 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
35404 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
35405 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
35406 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
35407 +0 0 0 4 4 4
35408 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35409 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
35410 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
35411 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
35412 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
35413 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
35414 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
35415 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
35416 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
35417 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
35418 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
35419 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
35420 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
35421 +2 0 0 0 0 0
35422 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
35423 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
35424 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
35425 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
35426 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
35427 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
35428 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
35429 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
35430 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
35431 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
35432 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
35433 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
35434 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
35435 +37 38 37 0 0 0
35436 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35437 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
35438 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
35439 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
35440 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
35441 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
35442 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
35443 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
35444 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
35445 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
35446 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
35447 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
35448 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
35449 +85 115 134 4 0 0
35450 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
35451 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
35452 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
35453 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
35454 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
35455 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
35456 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
35457 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
35458 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
35459 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
35460 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
35461 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
35462 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
35463 +60 73 81 4 0 0
35464 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
35465 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
35466 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
35467 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
35468 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
35469 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
35470 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
35471 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
35472 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
35473 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
35474 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
35475 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
35476 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
35477 +16 19 21 4 0 0
35478 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
35479 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
35480 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
35481 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
35482 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
35483 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
35484 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
35485 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
35486 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
35487 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
35488 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
35489 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
35490 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
35491 +4 0 0 4 3 3
35492 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
35493 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
35494 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
35495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
35496 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
35497 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
35498 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
35499 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
35500 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
35501 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
35502 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
35503 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
35504 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
35505 +3 2 2 4 4 4
35506 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
35507 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
35508 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
35509 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35510 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
35511 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
35512 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
35513 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
35514 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
35515 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
35516 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
35517 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
35518 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
35519 +4 4 4 4 4 4
35520 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
35521 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
35522 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
35523 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
35524 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
35525 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
35526 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
35527 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
35528 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
35529 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
35530 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
35531 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
35532 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
35533 +4 4 4 4 4 4
35534 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
35535 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
35536 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
35537 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
35538 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
35539 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35540 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
35541 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
35542 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
35543 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
35544 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
35545 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
35546 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
35547 +5 5 5 5 5 5
35548 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
35549 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
35550 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
35551 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
35552 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
35553 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35554 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
35555 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
35556 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
35557 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
35558 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
35559 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
35560 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
35561 +5 5 5 4 4 4
35562 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
35563 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
35564 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
35565 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
35566 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35567 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
35568 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
35569 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
35570 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
35571 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
35572 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
35573 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
35574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35575 +4 4 4 4 4 4
35576 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
35577 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
35578 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
35579 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
35580 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
35581 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35582 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35583 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
35584 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
35585 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
35586 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
35587 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
35588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35589 +4 4 4 4 4 4
35590 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
35591 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
35592 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
35593 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
35594 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35595 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
35596 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
35597 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
35598 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
35599 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
35600 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
35601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35603 +4 4 4 4 4 4
35604 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
35605 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
35606 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
35607 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
35608 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35609 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35610 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35611 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
35612 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
35613 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
35614 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
35615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35617 +4 4 4 4 4 4
35618 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
35619 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
35620 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
35621 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
35622 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35623 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
35624 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35625 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
35626 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
35627 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
35628 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35631 +4 4 4 4 4 4
35632 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
35633 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
35634 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
35635 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
35636 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35637 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
35638 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
35639 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
35640 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
35641 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
35642 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
35643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35645 +4 4 4 4 4 4
35646 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
35647 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
35648 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
35649 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
35650 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35651 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
35652 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
35653 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
35654 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
35655 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
35656 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
35657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35659 +4 4 4 4 4 4
35660 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
35661 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
35662 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
35663 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35664 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
35665 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
35666 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
35667 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
35668 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
35669 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
35670 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35673 +4 4 4 4 4 4
35674 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
35675 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
35676 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
35677 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35678 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35679 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
35680 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
35681 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
35682 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
35683 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
35684 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35687 +4 4 4 4 4 4
35688 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
35689 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
35690 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35691 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35692 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35693 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
35694 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
35695 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
35696 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
35697 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
35698 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35701 +4 4 4 4 4 4
35702 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
35703 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
35704 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35705 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35706 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35707 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
35708 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
35709 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
35710 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35711 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35712 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35715 +4 4 4 4 4 4
35716 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35717 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
35718 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35719 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
35720 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
35721 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
35722 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
35723 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
35724 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35725 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35726 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35729 +4 4 4 4 4 4
35730 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35731 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
35732 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35733 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
35734 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35735 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
35736 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
35737 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
35738 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35739 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35740 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35743 +4 4 4 4 4 4
35744 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
35745 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
35746 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35747 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
35748 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
35749 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
35750 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
35751 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
35752 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35753 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35754 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35757 +4 4 4 4 4 4
35758 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
35759 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
35760 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35761 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
35762 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
35763 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
35764 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
35765 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
35766 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35767 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35768 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35771 +4 4 4 4 4 4
35772 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35773 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
35774 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35775 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
35776 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
35777 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
35778 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
35779 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
35780 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35781 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35782 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35785 +4 4 4 4 4 4
35786 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
35787 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
35788 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35789 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
35790 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
35791 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
35792 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
35793 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
35794 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
35795 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35796 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35799 +4 4 4 4 4 4
35800 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35801 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
35802 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
35803 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
35804 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
35805 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
35806 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
35807 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
35808 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35809 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35810 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35813 +4 4 4 4 4 4
35814 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35815 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
35816 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35817 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
35818 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
35819 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
35820 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
35821 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
35822 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35823 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35824 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35827 +4 4 4 4 4 4
35828 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35829 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
35830 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
35831 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
35832 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
35833 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
35834 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35835 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
35836 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35837 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35838 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35841 +4 4 4 4 4 4
35842 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35843 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
35844 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
35845 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35846 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
35847 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
35848 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35849 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
35850 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35851 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35852 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35855 +4 4 4 4 4 4
35856 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35857 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
35858 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
35859 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
35860 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
35861 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
35862 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
35863 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
35864 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
35865 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35866 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35869 +4 4 4 4 4 4
35870 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35871 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
35872 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
35873 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
35874 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
35875 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
35876 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
35877 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
35878 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
35879 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35880 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35883 +4 4 4 4 4 4
35884 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
35885 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
35886 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
35887 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
35888 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35889 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
35890 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
35891 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
35892 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
35893 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35894 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35897 +4 4 4 4 4 4
35898 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35899 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
35900 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
35901 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
35902 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
35903 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
35904 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
35905 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
35906 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
35907 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35908 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35911 +4 4 4 4 4 4
35912 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
35913 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
35914 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
35915 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
35916 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
35917 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
35918 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
35919 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
35920 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
35921 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
35922 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35925 +4 4 4 4 4 4
35926 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
35927 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35928 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
35929 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
35930 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
35931 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
35932 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
35933 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
35934 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
35935 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
35936 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35939 +4 4 4 4 4 4
35940 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
35941 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35942 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
35943 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
35944 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
35945 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
35946 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35947 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
35948 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
35949 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
35950 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35953 +4 4 4 4 4 4
35954 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
35955 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
35956 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
35957 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
35958 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
35959 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
35960 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
35961 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
35962 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
35963 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
35964 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35967 +4 4 4 4 4 4
35968 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
35969 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
35970 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
35971 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
35972 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
35973 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
35974 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
35975 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
35976 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
35977 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
35978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35981 +4 4 4 4 4 4
35982 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
35983 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
35984 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
35985 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
35986 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
35987 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
35988 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
35989 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
35990 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
35991 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
35992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35995 +4 4 4 4 4 4
35996 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
35997 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
35998 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
35999 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
36000 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
36001 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
36002 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
36003 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
36004 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
36005 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36009 +4 4 4 4 4 4
36010 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
36011 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
36012 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
36013 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
36014 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
36015 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
36016 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
36017 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
36018 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36019 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36023 +4 4 4 4 4 4
36024 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
36025 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36026 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
36027 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36028 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
36029 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
36030 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
36031 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
36032 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
36033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36037 +4 4 4 4 4 4
36038 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
36039 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
36040 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
36041 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
36042 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
36043 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
36044 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
36045 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
36046 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
36047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36051 +4 4 4 4 4 4
36052 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36053 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
36054 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
36055 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
36056 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
36057 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
36058 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
36059 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
36060 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36065 +4 4 4 4 4 4
36066 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
36067 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
36068 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36069 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
36070 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
36071 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
36072 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
36073 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
36074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36079 +4 4 4 4 4 4
36080 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36081 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
36082 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
36083 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
36084 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
36085 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
36086 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
36087 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36093 +4 4 4 4 4 4
36094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36095 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
36096 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36097 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
36098 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
36099 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
36100 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
36101 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
36102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36107 +4 4 4 4 4 4
36108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36109 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
36110 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
36111 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
36112 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
36113 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
36114 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
36115 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
36116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36121 +4 4 4 4 4 4
36122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36123 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36124 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
36125 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36126 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
36127 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
36128 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
36129 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36135 +4 4 4 4 4 4
36136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36138 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36139 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
36140 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
36141 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
36142 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
36143 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36149 +4 4 4 4 4 4
36150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36153 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36154 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
36155 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
36156 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
36157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36163 +4 4 4 4 4 4
36164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36167 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36168 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36169 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
36170 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
36171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36177 +4 4 4 4 4 4
36178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36181 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36182 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36183 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36184 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
36185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36191 +4 4 4 4 4 4
36192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36195 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
36196 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
36197 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
36198 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
36199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36205 +4 4 4 4 4 4
36206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36210 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
36211 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36212 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36219 +4 4 4 4 4 4
36220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36224 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
36225 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
36226 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36233 +4 4 4 4 4 4
36234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36238 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
36239 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
36240 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36247 +4 4 4 4 4 4
36248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36252 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
36253 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
36254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36261 +4 4 4 4 4 4
36262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36266 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36267 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
36268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36275 +4 4 4 4 4 4
36276 diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36277 --- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
36278 +++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
36279 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
36280 dlfb_urb_completion(urb);
36281
36282 error:
36283 - atomic_add(bytes_sent, &dev->bytes_sent);
36284 - atomic_add(bytes_identical, &dev->bytes_identical);
36285 - atomic_add(width*height*2, &dev->bytes_rendered);
36286 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36287 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36288 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36289 end_cycles = get_cycles();
36290 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
36291 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36292 >> 10)), /* Kcycles */
36293 &dev->cpu_kcycles_used);
36294
36295 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
36296 dlfb_urb_completion(urb);
36297
36298 error:
36299 - atomic_add(bytes_sent, &dev->bytes_sent);
36300 - atomic_add(bytes_identical, &dev->bytes_identical);
36301 - atomic_add(bytes_rendered, &dev->bytes_rendered);
36302 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36303 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36304 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36305 end_cycles = get_cycles();
36306 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
36307 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36308 >> 10)), /* Kcycles */
36309 &dev->cpu_kcycles_used);
36310 }
36311 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36312 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36313 struct dlfb_data *dev = fb_info->par;
36314 return snprintf(buf, PAGE_SIZE, "%u\n",
36315 - atomic_read(&dev->bytes_rendered));
36316 + atomic_read_unchecked(&dev->bytes_rendered));
36317 }
36318
36319 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36320 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36321 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36322 struct dlfb_data *dev = fb_info->par;
36323 return snprintf(buf, PAGE_SIZE, "%u\n",
36324 - atomic_read(&dev->bytes_identical));
36325 + atomic_read_unchecked(&dev->bytes_identical));
36326 }
36327
36328 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36329 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36330 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36331 struct dlfb_data *dev = fb_info->par;
36332 return snprintf(buf, PAGE_SIZE, "%u\n",
36333 - atomic_read(&dev->bytes_sent));
36334 + atomic_read_unchecked(&dev->bytes_sent));
36335 }
36336
36337 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36338 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36339 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36340 struct dlfb_data *dev = fb_info->par;
36341 return snprintf(buf, PAGE_SIZE, "%u\n",
36342 - atomic_read(&dev->cpu_kcycles_used));
36343 + atomic_read_unchecked(&dev->cpu_kcycles_used));
36344 }
36345
36346 static ssize_t edid_show(
36347 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36348 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36349 struct dlfb_data *dev = fb_info->par;
36350
36351 - atomic_set(&dev->bytes_rendered, 0);
36352 - atomic_set(&dev->bytes_identical, 0);
36353 - atomic_set(&dev->bytes_sent, 0);
36354 - atomic_set(&dev->cpu_kcycles_used, 0);
36355 + atomic_set_unchecked(&dev->bytes_rendered, 0);
36356 + atomic_set_unchecked(&dev->bytes_identical, 0);
36357 + atomic_set_unchecked(&dev->bytes_sent, 0);
36358 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36359
36360 return count;
36361 }
36362 diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36363 --- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36364 +++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36365 @@ -19,6 +19,7 @@
36366 #include <linux/io.h>
36367 #include <linux/mutex.h>
36368 #include <linux/slab.h>
36369 +#include <linux/moduleloader.h>
36370 #include <video/edid.h>
36371 #include <video/uvesafb.h>
36372 #ifdef CONFIG_X86
36373 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36374 NULL,
36375 };
36376
36377 - return call_usermodehelper(v86d_path, argv, envp, 1);
36378 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36379 }
36380
36381 /*
36382 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36383 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36384 par->pmi_setpal = par->ypan = 0;
36385 } else {
36386 +
36387 +#ifdef CONFIG_PAX_KERNEXEC
36388 +#ifdef CONFIG_MODULES
36389 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36390 +#endif
36391 + if (!par->pmi_code) {
36392 + par->pmi_setpal = par->ypan = 0;
36393 + return 0;
36394 + }
36395 +#endif
36396 +
36397 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36398 + task->t.regs.edi);
36399 +
36400 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36401 + pax_open_kernel();
36402 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36403 + pax_close_kernel();
36404 +
36405 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36406 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36407 +#else
36408 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36409 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36410 +#endif
36411 +
36412 printk(KERN_INFO "uvesafb: protected mode interface info at "
36413 "%04x:%04x\n",
36414 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36415 @@ -1821,6 +1844,11 @@ out:
36416 if (par->vbe_modes)
36417 kfree(par->vbe_modes);
36418
36419 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36420 + if (par->pmi_code)
36421 + module_free_exec(NULL, par->pmi_code);
36422 +#endif
36423 +
36424 framebuffer_release(info);
36425 return err;
36426 }
36427 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36428 kfree(par->vbe_state_orig);
36429 if (par->vbe_state_saved)
36430 kfree(par->vbe_state_saved);
36431 +
36432 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36433 + if (par->pmi_code)
36434 + module_free_exec(NULL, par->pmi_code);
36435 +#endif
36436 +
36437 }
36438
36439 framebuffer_release(info);
36440 diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36441 --- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
36442 +++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
36443 @@ -9,6 +9,7 @@
36444 */
36445
36446 #include <linux/module.h>
36447 +#include <linux/moduleloader.h>
36448 #include <linux/kernel.h>
36449 #include <linux/errno.h>
36450 #include <linux/string.h>
36451 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
36452 static int vram_total __initdata; /* Set total amount of memory */
36453 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36454 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36455 -static void (*pmi_start)(void) __read_mostly;
36456 -static void (*pmi_pal) (void) __read_mostly;
36457 +static void (*pmi_start)(void) __read_only;
36458 +static void (*pmi_pal) (void) __read_only;
36459 static int depth __read_mostly;
36460 static int vga_compat __read_mostly;
36461 /* --------------------------------------------------------------------- */
36462 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36463 unsigned int size_vmode;
36464 unsigned int size_remap;
36465 unsigned int size_total;
36466 + void *pmi_code = NULL;
36467
36468 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36469 return -ENODEV;
36470 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36471 size_remap = size_total;
36472 vesafb_fix.smem_len = size_remap;
36473
36474 -#ifndef __i386__
36475 - screen_info.vesapm_seg = 0;
36476 -#endif
36477 -
36478 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36479 printk(KERN_WARNING
36480 "vesafb: cannot reserve video memory at 0x%lx\n",
36481 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
36482 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36483 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36484
36485 +#ifdef __i386__
36486 +
36487 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36488 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
36489 + if (!pmi_code)
36490 +#elif !defined(CONFIG_PAX_KERNEXEC)
36491 + if (0)
36492 +#endif
36493 +
36494 +#endif
36495 + screen_info.vesapm_seg = 0;
36496 +
36497 if (screen_info.vesapm_seg) {
36498 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36499 - screen_info.vesapm_seg,screen_info.vesapm_off);
36500 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36501 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36502 }
36503
36504 if (screen_info.vesapm_seg < 0xc000)
36505 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
36506
36507 if (ypan || pmi_setpal) {
36508 unsigned short *pmi_base;
36509 +
36510 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36511 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36512 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36513 +
36514 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36515 + pax_open_kernel();
36516 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36517 +#else
36518 + pmi_code = pmi_base;
36519 +#endif
36520 +
36521 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36522 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36523 +
36524 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36525 + pmi_start = ktva_ktla(pmi_start);
36526 + pmi_pal = ktva_ktla(pmi_pal);
36527 + pax_close_kernel();
36528 +#endif
36529 +
36530 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36531 if (pmi_base[3]) {
36532 printk(KERN_INFO "vesafb: pmi: ports = ");
36533 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
36534 info->node, info->fix.id);
36535 return 0;
36536 err:
36537 +
36538 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36539 + module_free_exec(NULL, pmi_code);
36540 +#endif
36541 +
36542 if (info->screen_base)
36543 iounmap(info->screen_base);
36544 framebuffer_release(info);
36545 diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
36546 --- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
36547 +++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
36548 @@ -56,7 +56,7 @@ struct via_clock {
36549
36550 void (*set_engine_pll_state)(u8 state);
36551 void (*set_engine_pll)(struct via_pll_config config);
36552 -};
36553 +} __no_const;
36554
36555
36556 static inline u32 get_pll_internal_frequency(u32 ref_freq,
36557 diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
36558 --- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
36559 +++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
36560 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
36561 struct sysinfo i;
36562 int idx = 0;
36563
36564 + pax_track_stack();
36565 +
36566 all_vm_events(events);
36567 si_meminfo(&i);
36568
36569 diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
36570 --- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
36571 +++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
36572 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
36573 void
36574 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36575 {
36576 - char *s = nd_get_link(nd);
36577 + const char *s = nd_get_link(nd);
36578
36579 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36580 IS_ERR(s) ? "<error>" : s);
36581 diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
36582 --- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
36583 +++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
36584 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
36585 size += sizeof(struct io_event) * nr_events;
36586 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36587
36588 - if (nr_pages < 0)
36589 + if (nr_pages <= 0)
36590 return -EINVAL;
36591
36592 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36593 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
36594 struct aio_timeout to;
36595 int retry = 0;
36596
36597 + pax_track_stack();
36598 +
36599 /* needed to zero any padding within an entry (there shouldn't be
36600 * any, but C is fun!
36601 */
36602 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
36603 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
36604 {
36605 ssize_t ret;
36606 + struct iovec iovstack;
36607
36608 #ifdef CONFIG_COMPAT
36609 if (compat)
36610 ret = compat_rw_copy_check_uvector(type,
36611 (struct compat_iovec __user *)kiocb->ki_buf,
36612 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36613 + kiocb->ki_nbytes, 1, &iovstack,
36614 &kiocb->ki_iovec);
36615 else
36616 #endif
36617 ret = rw_copy_check_uvector(type,
36618 (struct iovec __user *)kiocb->ki_buf,
36619 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36620 + kiocb->ki_nbytes, 1, &iovstack,
36621 &kiocb->ki_iovec);
36622 if (ret < 0)
36623 goto out;
36624
36625 + if (kiocb->ki_iovec == &iovstack) {
36626 + kiocb->ki_inline_vec = iovstack;
36627 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
36628 + }
36629 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36630 kiocb->ki_cur_seg = 0;
36631 /* ki_nbytes/left now reflect bytes instead of segs */
36632 diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
36633 --- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
36634 +++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
36635 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
36636 unsigned long limit;
36637
36638 limit = rlimit(RLIMIT_FSIZE);
36639 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36640 if (limit != RLIM_INFINITY && offset > limit)
36641 goto out_sig;
36642 if (offset > inode->i_sb->s_maxbytes)
36643 diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
36644 --- linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:13.000000000 -0400
36645 +++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
36646 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
36647 {
36648 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36649 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36650 - char *link = nd_get_link(nd);
36651 + const char *link = nd_get_link(nd);
36652 if (!IS_ERR(link))
36653 kfree(link);
36654 }
36655 diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
36656 --- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
36657 +++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
36658 @@ -16,6 +16,7 @@
36659 #include <linux/string.h>
36660 #include <linux/fs.h>
36661 #include <linux/file.h>
36662 +#include <linux/security.h>
36663 #include <linux/stat.h>
36664 #include <linux/fcntl.h>
36665 #include <linux/ptrace.h>
36666 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
36667 #endif
36668 # define START_STACK(u) ((void __user *)u.start_stack)
36669
36670 + memset(&dump, 0, sizeof(dump));
36671 +
36672 fs = get_fs();
36673 set_fs(KERNEL_DS);
36674 has_dumped = 1;
36675 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
36676
36677 /* If the size of the dump file exceeds the rlimit, then see what would happen
36678 if we wrote the stack, but not the data area. */
36679 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36680 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
36681 dump.u_dsize = 0;
36682
36683 /* Make sure we have enough room to write the stack and data areas. */
36684 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36685 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
36686 dump.u_ssize = 0;
36687
36688 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
36689 rlim = rlimit(RLIMIT_DATA);
36690 if (rlim >= RLIM_INFINITY)
36691 rlim = ~0;
36692 +
36693 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36694 if (ex.a_data + ex.a_bss > rlim)
36695 return -ENOMEM;
36696
36697 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
36698 install_exec_creds(bprm);
36699 current->flags &= ~PF_FORKNOEXEC;
36700
36701 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36702 + current->mm->pax_flags = 0UL;
36703 +#endif
36704 +
36705 +#ifdef CONFIG_PAX_PAGEEXEC
36706 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36707 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36708 +
36709 +#ifdef CONFIG_PAX_EMUTRAMP
36710 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36711 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36712 +#endif
36713 +
36714 +#ifdef CONFIG_PAX_MPROTECT
36715 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36716 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36717 +#endif
36718 +
36719 + }
36720 +#endif
36721 +
36722 if (N_MAGIC(ex) == OMAGIC) {
36723 unsigned long text_addr, map_size;
36724 loff_t pos;
36725 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
36726
36727 down_write(&current->mm->mmap_sem);
36728 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36729 - PROT_READ | PROT_WRITE | PROT_EXEC,
36730 + PROT_READ | PROT_WRITE,
36731 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36732 fd_offset + ex.a_text);
36733 up_write(&current->mm->mmap_sem);
36734 diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
36735 --- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
36736 +++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
36737 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
36738 #define elf_core_dump NULL
36739 #endif
36740
36741 +#ifdef CONFIG_PAX_MPROTECT
36742 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36743 +#endif
36744 +
36745 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36746 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36747 #else
36748 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
36749 .load_binary = load_elf_binary,
36750 .load_shlib = load_elf_library,
36751 .core_dump = elf_core_dump,
36752 +
36753 +#ifdef CONFIG_PAX_MPROTECT
36754 + .handle_mprotect= elf_handle_mprotect,
36755 +#endif
36756 +
36757 .min_coredump = ELF_EXEC_PAGESIZE,
36758 };
36759
36760 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36761
36762 static int set_brk(unsigned long start, unsigned long end)
36763 {
36764 + unsigned long e = end;
36765 +
36766 start = ELF_PAGEALIGN(start);
36767 end = ELF_PAGEALIGN(end);
36768 if (end > start) {
36769 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36770 if (BAD_ADDR(addr))
36771 return addr;
36772 }
36773 - current->mm->start_brk = current->mm->brk = end;
36774 + current->mm->start_brk = current->mm->brk = e;
36775 return 0;
36776 }
36777
36778 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36779 elf_addr_t __user *u_rand_bytes;
36780 const char *k_platform = ELF_PLATFORM;
36781 const char *k_base_platform = ELF_BASE_PLATFORM;
36782 - unsigned char k_rand_bytes[16];
36783 + u32 k_rand_bytes[4];
36784 int items;
36785 elf_addr_t *elf_info;
36786 int ei_index = 0;
36787 const struct cred *cred = current_cred();
36788 struct vm_area_struct *vma;
36789 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36790 +
36791 + pax_track_stack();
36792
36793 /*
36794 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36795 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36796 * Generate 16 random bytes for userspace PRNG seeding.
36797 */
36798 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36799 - u_rand_bytes = (elf_addr_t __user *)
36800 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36801 + srandom32(k_rand_bytes[0] ^ random32());
36802 + srandom32(k_rand_bytes[1] ^ random32());
36803 + srandom32(k_rand_bytes[2] ^ random32());
36804 + srandom32(k_rand_bytes[3] ^ random32());
36805 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36806 + u_rand_bytes = (elf_addr_t __user *) p;
36807 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36808 return -EFAULT;
36809
36810 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36811 return -EFAULT;
36812 current->mm->env_end = p;
36813
36814 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36815 +
36816 /* Put the elf_info on the stack in the right place. */
36817 sp = (elf_addr_t __user *)envp + 1;
36818 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36819 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36820 return -EFAULT;
36821 return 0;
36822 }
36823 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
36824 {
36825 struct elf_phdr *elf_phdata;
36826 struct elf_phdr *eppnt;
36827 - unsigned long load_addr = 0;
36828 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36829 int load_addr_set = 0;
36830 unsigned long last_bss = 0, elf_bss = 0;
36831 - unsigned long error = ~0UL;
36832 + unsigned long error = -EINVAL;
36833 unsigned long total_size;
36834 int retval, i, size;
36835
36836 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
36837 goto out_close;
36838 }
36839
36840 +#ifdef CONFIG_PAX_SEGMEXEC
36841 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36842 + pax_task_size = SEGMEXEC_TASK_SIZE;
36843 +#endif
36844 +
36845 eppnt = elf_phdata;
36846 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36847 if (eppnt->p_type == PT_LOAD) {
36848 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
36849 k = load_addr + eppnt->p_vaddr;
36850 if (BAD_ADDR(k) ||
36851 eppnt->p_filesz > eppnt->p_memsz ||
36852 - eppnt->p_memsz > TASK_SIZE ||
36853 - TASK_SIZE - eppnt->p_memsz < k) {
36854 + eppnt->p_memsz > pax_task_size ||
36855 + pax_task_size - eppnt->p_memsz < k) {
36856 error = -ENOMEM;
36857 goto out_close;
36858 }
36859 @@ -528,6 +553,193 @@ out:
36860 return error;
36861 }
36862
36863 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36864 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36865 +{
36866 + unsigned long pax_flags = 0UL;
36867 +
36868 +#ifdef CONFIG_PAX_PAGEEXEC
36869 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36870 + pax_flags |= MF_PAX_PAGEEXEC;
36871 +#endif
36872 +
36873 +#ifdef CONFIG_PAX_SEGMEXEC
36874 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36875 + pax_flags |= MF_PAX_SEGMEXEC;
36876 +#endif
36877 +
36878 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36879 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36880 + if ((__supported_pte_mask & _PAGE_NX))
36881 + pax_flags &= ~MF_PAX_SEGMEXEC;
36882 + else
36883 + pax_flags &= ~MF_PAX_PAGEEXEC;
36884 + }
36885 +#endif
36886 +
36887 +#ifdef CONFIG_PAX_EMUTRAMP
36888 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36889 + pax_flags |= MF_PAX_EMUTRAMP;
36890 +#endif
36891 +
36892 +#ifdef CONFIG_PAX_MPROTECT
36893 + if (elf_phdata->p_flags & PF_MPROTECT)
36894 + pax_flags |= MF_PAX_MPROTECT;
36895 +#endif
36896 +
36897 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36898 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36899 + pax_flags |= MF_PAX_RANDMMAP;
36900 +#endif
36901 +
36902 + return pax_flags;
36903 +}
36904 +#endif
36905 +
36906 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36907 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36908 +{
36909 + unsigned long pax_flags = 0UL;
36910 +
36911 +#ifdef CONFIG_PAX_PAGEEXEC
36912 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36913 + pax_flags |= MF_PAX_PAGEEXEC;
36914 +#endif
36915 +
36916 +#ifdef CONFIG_PAX_SEGMEXEC
36917 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36918 + pax_flags |= MF_PAX_SEGMEXEC;
36919 +#endif
36920 +
36921 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36922 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36923 + if ((__supported_pte_mask & _PAGE_NX))
36924 + pax_flags &= ~MF_PAX_SEGMEXEC;
36925 + else
36926 + pax_flags &= ~MF_PAX_PAGEEXEC;
36927 + }
36928 +#endif
36929 +
36930 +#ifdef CONFIG_PAX_EMUTRAMP
36931 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36932 + pax_flags |= MF_PAX_EMUTRAMP;
36933 +#endif
36934 +
36935 +#ifdef CONFIG_PAX_MPROTECT
36936 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36937 + pax_flags |= MF_PAX_MPROTECT;
36938 +#endif
36939 +
36940 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36941 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36942 + pax_flags |= MF_PAX_RANDMMAP;
36943 +#endif
36944 +
36945 + return pax_flags;
36946 +}
36947 +#endif
36948 +
36949 +#ifdef CONFIG_PAX_EI_PAX
36950 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36951 +{
36952 + unsigned long pax_flags = 0UL;
36953 +
36954 +#ifdef CONFIG_PAX_PAGEEXEC
36955 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36956 + pax_flags |= MF_PAX_PAGEEXEC;
36957 +#endif
36958 +
36959 +#ifdef CONFIG_PAX_SEGMEXEC
36960 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36961 + pax_flags |= MF_PAX_SEGMEXEC;
36962 +#endif
36963 +
36964 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36965 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36966 + if ((__supported_pte_mask & _PAGE_NX))
36967 + pax_flags &= ~MF_PAX_SEGMEXEC;
36968 + else
36969 + pax_flags &= ~MF_PAX_PAGEEXEC;
36970 + }
36971 +#endif
36972 +
36973 +#ifdef CONFIG_PAX_EMUTRAMP
36974 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36975 + pax_flags |= MF_PAX_EMUTRAMP;
36976 +#endif
36977 +
36978 +#ifdef CONFIG_PAX_MPROTECT
36979 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36980 + pax_flags |= MF_PAX_MPROTECT;
36981 +#endif
36982 +
36983 +#ifdef CONFIG_PAX_ASLR
36984 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36985 + pax_flags |= MF_PAX_RANDMMAP;
36986 +#endif
36987 +
36988 + return pax_flags;
36989 +}
36990 +#endif
36991 +
36992 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36993 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36994 +{
36995 + unsigned long pax_flags = 0UL;
36996 +
36997 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36998 + unsigned long i;
36999 + int found_flags = 0;
37000 +#endif
37001 +
37002 +#ifdef CONFIG_PAX_EI_PAX
37003 + pax_flags = pax_parse_ei_pax(elf_ex);
37004 +#endif
37005 +
37006 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
37007 + for (i = 0UL; i < elf_ex->e_phnum; i++)
37008 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37009 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37010 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37011 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37012 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37013 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37014 + return -EINVAL;
37015 +
37016 +#ifdef CONFIG_PAX_SOFTMODE
37017 + if (pax_softmode)
37018 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
37019 + else
37020 +#endif
37021 +
37022 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37023 + found_flags = 1;
37024 + break;
37025 + }
37026 +#endif
37027 +
37028 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37029 + if (found_flags == 0) {
37030 + struct elf_phdr phdr;
37031 + memset(&phdr, 0, sizeof(phdr));
37032 + phdr.p_flags = PF_NOEMUTRAMP;
37033 +#ifdef CONFIG_PAX_SOFTMODE
37034 + if (pax_softmode)
37035 + pax_flags = pax_parse_softmode(&phdr);
37036 + else
37037 +#endif
37038 + pax_flags = pax_parse_hardmode(&phdr);
37039 + }
37040 +#endif
37041 +
37042 + if (0 > pax_check_flags(&pax_flags))
37043 + return -EINVAL;
37044 +
37045 + current->mm->pax_flags = pax_flags;
37046 + return 0;
37047 +}
37048 +#endif
37049 +
37050 /*
37051 * These are the functions used to load ELF style executables and shared
37052 * libraries. There is no binary dependent code anywhere else.
37053 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37054 {
37055 unsigned int random_variable = 0;
37056
37057 +#ifdef CONFIG_PAX_RANDUSTACK
37058 + if (randomize_va_space)
37059 + return stack_top - current->mm->delta_stack;
37060 +#endif
37061 +
37062 if ((current->flags & PF_RANDOMIZE) &&
37063 !(current->personality & ADDR_NO_RANDOMIZE)) {
37064 random_variable = get_random_int() & STACK_RND_MASK;
37065 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37066 unsigned long load_addr = 0, load_bias = 0;
37067 int load_addr_set = 0;
37068 char * elf_interpreter = NULL;
37069 - unsigned long error;
37070 + unsigned long error = 0;
37071 struct elf_phdr *elf_ppnt, *elf_phdata;
37072 unsigned long elf_bss, elf_brk;
37073 int retval, i;
37074 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37075 unsigned long start_code, end_code, start_data, end_data;
37076 unsigned long reloc_func_desc __maybe_unused = 0;
37077 int executable_stack = EXSTACK_DEFAULT;
37078 - unsigned long def_flags = 0;
37079 struct {
37080 struct elfhdr elf_ex;
37081 struct elfhdr interp_elf_ex;
37082 } *loc;
37083 + unsigned long pax_task_size = TASK_SIZE;
37084
37085 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37086 if (!loc) {
37087 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37088
37089 /* OK, This is the point of no return */
37090 current->flags &= ~PF_FORKNOEXEC;
37091 - current->mm->def_flags = def_flags;
37092 +
37093 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37094 + current->mm->pax_flags = 0UL;
37095 +#endif
37096 +
37097 +#ifdef CONFIG_PAX_DLRESOLVE
37098 + current->mm->call_dl_resolve = 0UL;
37099 +#endif
37100 +
37101 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37102 + current->mm->call_syscall = 0UL;
37103 +#endif
37104 +
37105 +#ifdef CONFIG_PAX_ASLR
37106 + current->mm->delta_mmap = 0UL;
37107 + current->mm->delta_stack = 0UL;
37108 +#endif
37109 +
37110 + current->mm->def_flags = 0;
37111 +
37112 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37113 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37114 + send_sig(SIGKILL, current, 0);
37115 + goto out_free_dentry;
37116 + }
37117 +#endif
37118 +
37119 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37120 + pax_set_initial_flags(bprm);
37121 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37122 + if (pax_set_initial_flags_func)
37123 + (pax_set_initial_flags_func)(bprm);
37124 +#endif
37125 +
37126 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37127 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37128 + current->mm->context.user_cs_limit = PAGE_SIZE;
37129 + current->mm->def_flags |= VM_PAGEEXEC;
37130 + }
37131 +#endif
37132 +
37133 +#ifdef CONFIG_PAX_SEGMEXEC
37134 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37135 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37136 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37137 + pax_task_size = SEGMEXEC_TASK_SIZE;
37138 + current->mm->def_flags |= VM_NOHUGEPAGE;
37139 + }
37140 +#endif
37141 +
37142 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37143 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37144 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37145 + put_cpu();
37146 + }
37147 +#endif
37148
37149 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37150 may depend on the personality. */
37151 SET_PERSONALITY(loc->elf_ex);
37152 +
37153 +#ifdef CONFIG_PAX_ASLR
37154 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37155 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37156 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37157 + }
37158 +#endif
37159 +
37160 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37161 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37162 + executable_stack = EXSTACK_DISABLE_X;
37163 + current->personality &= ~READ_IMPLIES_EXEC;
37164 + } else
37165 +#endif
37166 +
37167 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37168 current->personality |= READ_IMPLIES_EXEC;
37169
37170 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37171 #else
37172 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37173 #endif
37174 +
37175 +#ifdef CONFIG_PAX_RANDMMAP
37176 + /* PaX: randomize base address at the default exe base if requested */
37177 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37178 +#ifdef CONFIG_SPARC64
37179 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37180 +#else
37181 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37182 +#endif
37183 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37184 + elf_flags |= MAP_FIXED;
37185 + }
37186 +#endif
37187 +
37188 }
37189
37190 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37191 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37192 * allowed task size. Note that p_filesz must always be
37193 * <= p_memsz so it is only necessary to check p_memsz.
37194 */
37195 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37196 - elf_ppnt->p_memsz > TASK_SIZE ||
37197 - TASK_SIZE - elf_ppnt->p_memsz < k) {
37198 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37199 + elf_ppnt->p_memsz > pax_task_size ||
37200 + pax_task_size - elf_ppnt->p_memsz < k) {
37201 /* set_brk can never work. Avoid overflows. */
37202 send_sig(SIGKILL, current, 0);
37203 retval = -EINVAL;
37204 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37205 start_data += load_bias;
37206 end_data += load_bias;
37207
37208 +#ifdef CONFIG_PAX_RANDMMAP
37209 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37210 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37211 +#endif
37212 +
37213 /* Calling set_brk effectively mmaps the pages that we need
37214 * for the bss and break sections. We must do this before
37215 * mapping in the interpreter, to make sure it doesn't wind
37216 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37217 goto out_free_dentry;
37218 }
37219 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37220 - send_sig(SIGSEGV, current, 0);
37221 - retval = -EFAULT; /* Nobody gets to see this, but.. */
37222 - goto out_free_dentry;
37223 + /*
37224 + * This bss-zeroing can fail if the ELF
37225 + * file specifies odd protections. So
37226 + * we don't check the return value
37227 + */
37228 }
37229
37230 if (elf_interpreter) {
37231 @@ -1090,7 +1398,7 @@ out:
37232 * Decide what to dump of a segment, part, all or none.
37233 */
37234 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37235 - unsigned long mm_flags)
37236 + unsigned long mm_flags, long signr)
37237 {
37238 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37239
37240 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37241 if (vma->vm_file == NULL)
37242 return 0;
37243
37244 - if (FILTER(MAPPED_PRIVATE))
37245 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37246 goto whole;
37247
37248 /*
37249 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37250 {
37251 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37252 int i = 0;
37253 - do
37254 + do {
37255 i += 2;
37256 - while (auxv[i - 2] != AT_NULL);
37257 + } while (auxv[i - 2] != AT_NULL);
37258 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37259 }
37260
37261 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37262 }
37263
37264 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37265 - unsigned long mm_flags)
37266 + struct coredump_params *cprm)
37267 {
37268 struct vm_area_struct *vma;
37269 size_t size = 0;
37270
37271 for (vma = first_vma(current, gate_vma); vma != NULL;
37272 vma = next_vma(vma, gate_vma))
37273 - size += vma_dump_size(vma, mm_flags);
37274 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37275 return size;
37276 }
37277
37278 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37279
37280 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37281
37282 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37283 + offset += elf_core_vma_data_size(gate_vma, cprm);
37284 offset += elf_core_extra_data_size();
37285 e_shoff = offset;
37286
37287 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37288 offset = dataoff;
37289
37290 size += sizeof(*elf);
37291 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37292 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37293 goto end_coredump;
37294
37295 size += sizeof(*phdr4note);
37296 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37297 if (size > cprm->limit
37298 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37299 goto end_coredump;
37300 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37301 phdr.p_offset = offset;
37302 phdr.p_vaddr = vma->vm_start;
37303 phdr.p_paddr = 0;
37304 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37305 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37306 phdr.p_memsz = vma->vm_end - vma->vm_start;
37307 offset += phdr.p_filesz;
37308 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37309 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37310 phdr.p_align = ELF_EXEC_PAGESIZE;
37311
37312 size += sizeof(phdr);
37313 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37314 if (size > cprm->limit
37315 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37316 goto end_coredump;
37317 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37318 unsigned long addr;
37319 unsigned long end;
37320
37321 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37322 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37323
37324 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37325 struct page *page;
37326 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37327 page = get_dump_page(addr);
37328 if (page) {
37329 void *kaddr = kmap(page);
37330 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37331 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37332 !dump_write(cprm->file, kaddr,
37333 PAGE_SIZE);
37334 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37335
37336 if (e_phnum == PN_XNUM) {
37337 size += sizeof(*shdr4extnum);
37338 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37339 if (size > cprm->limit
37340 || !dump_write(cprm->file, shdr4extnum,
37341 sizeof(*shdr4extnum)))
37342 @@ -2067,6 +2380,97 @@ out:
37343
37344 #endif /* CONFIG_ELF_CORE */
37345
37346 +#ifdef CONFIG_PAX_MPROTECT
37347 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
37348 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37349 + * we'll remove VM_MAYWRITE for good on RELRO segments.
37350 + *
37351 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37352 + * basis because we want to allow the common case and not the special ones.
37353 + */
37354 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37355 +{
37356 + struct elfhdr elf_h;
37357 + struct elf_phdr elf_p;
37358 + unsigned long i;
37359 + unsigned long oldflags;
37360 + bool is_textrel_rw, is_textrel_rx, is_relro;
37361 +
37362 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37363 + return;
37364 +
37365 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37366 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37367 +
37368 +#ifdef CONFIG_PAX_ELFRELOCS
37369 + /* possible TEXTREL */
37370 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37371 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37372 +#else
37373 + is_textrel_rw = false;
37374 + is_textrel_rx = false;
37375 +#endif
37376 +
37377 + /* possible RELRO */
37378 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37379 +
37380 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37381 + return;
37382 +
37383 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37384 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37385 +
37386 +#ifdef CONFIG_PAX_ETEXECRELOCS
37387 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37388 +#else
37389 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37390 +#endif
37391 +
37392 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37393 + !elf_check_arch(&elf_h) ||
37394 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37395 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37396 + return;
37397 +
37398 + for (i = 0UL; i < elf_h.e_phnum; i++) {
37399 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37400 + return;
37401 + switch (elf_p.p_type) {
37402 + case PT_DYNAMIC:
37403 + if (!is_textrel_rw && !is_textrel_rx)
37404 + continue;
37405 + i = 0UL;
37406 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37407 + elf_dyn dyn;
37408 +
37409 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37410 + return;
37411 + if (dyn.d_tag == DT_NULL)
37412 + return;
37413 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37414 + gr_log_textrel(vma);
37415 + if (is_textrel_rw)
37416 + vma->vm_flags |= VM_MAYWRITE;
37417 + else
37418 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37419 + vma->vm_flags &= ~VM_MAYWRITE;
37420 + return;
37421 + }
37422 + i++;
37423 + }
37424 + return;
37425 +
37426 + case PT_GNU_RELRO:
37427 + if (!is_relro)
37428 + continue;
37429 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37430 + vma->vm_flags &= ~VM_MAYWRITE;
37431 + return;
37432 + }
37433 + }
37434 +}
37435 +#endif
37436 +
37437 static int __init init_elf_binfmt(void)
37438 {
37439 return register_binfmt(&elf_format);
37440 diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37441 --- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
37442 +++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
37443 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37444 realdatastart = (unsigned long) -ENOMEM;
37445 printk("Unable to allocate RAM for process data, errno %d\n",
37446 (int)-realdatastart);
37447 + down_write(&current->mm->mmap_sem);
37448 do_munmap(current->mm, textpos, text_len);
37449 + up_write(&current->mm->mmap_sem);
37450 ret = realdatastart;
37451 goto err;
37452 }
37453 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
37454 }
37455 if (IS_ERR_VALUE(result)) {
37456 printk("Unable to read data+bss, errno %d\n", (int)-result);
37457 + down_write(&current->mm->mmap_sem);
37458 do_munmap(current->mm, textpos, text_len);
37459 do_munmap(current->mm, realdatastart, len);
37460 + up_write(&current->mm->mmap_sem);
37461 ret = result;
37462 goto err;
37463 }
37464 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
37465 }
37466 if (IS_ERR_VALUE(result)) {
37467 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37468 + down_write(&current->mm->mmap_sem);
37469 do_munmap(current->mm, textpos, text_len + data_len + extra +
37470 MAX_SHARED_LIBS * sizeof(unsigned long));
37471 + up_write(&current->mm->mmap_sem);
37472 ret = result;
37473 goto err;
37474 }
37475 diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
37476 --- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
37477 +++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
37478 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
37479 const int read = bio_data_dir(bio) == READ;
37480 struct bio_map_data *bmd = bio->bi_private;
37481 int i;
37482 - char *p = bmd->sgvecs[0].iov_base;
37483 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
37484
37485 __bio_for_each_segment(bvec, bio, i, 0) {
37486 char *addr = page_address(bvec->bv_page);
37487 diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
37488 --- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
37489 +++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
37490 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
37491 else if (bdev->bd_contains == bdev)
37492 return true; /* is a whole device which isn't held */
37493
37494 - else if (whole->bd_holder == bd_may_claim)
37495 + else if (whole->bd_holder == (void *)bd_may_claim)
37496 return true; /* is a partition of a device that is being partitioned */
37497 else if (whole->bd_holder != NULL)
37498 return false; /* is a partition of a held device */
37499 diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
37500 --- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
37501 +++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
37502 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
37503 free_extent_buffer(buf);
37504 add_root_to_dirty_list(root);
37505 } else {
37506 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37507 - parent_start = parent->start;
37508 - else
37509 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37510 + if (parent)
37511 + parent_start = parent->start;
37512 + else
37513 + parent_start = 0;
37514 + } else
37515 parent_start = 0;
37516
37517 WARN_ON(trans->transid != btrfs_header_generation(parent));
37518 diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
37519 --- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
37520 +++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
37521 @@ -6895,7 +6895,7 @@ fail:
37522 return -ENOMEM;
37523 }
37524
37525 -static int btrfs_getattr(struct vfsmount *mnt,
37526 +int btrfs_getattr(struct vfsmount *mnt,
37527 struct dentry *dentry, struct kstat *stat)
37528 {
37529 struct inode *inode = dentry->d_inode;
37530 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
37531 return 0;
37532 }
37533
37534 +EXPORT_SYMBOL(btrfs_getattr);
37535 +
37536 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37537 +{
37538 + return BTRFS_I(inode)->root->anon_super.s_dev;
37539 +}
37540 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37541 +
37542 /*
37543 * If a file is moved, it will inherit the cow and compression flags of the new
37544 * directory.
37545 diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
37546 --- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
37547 +++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
37548 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
37549 for (i = 0; i < num_types; i++) {
37550 struct btrfs_space_info *tmp;
37551
37552 + /* Don't copy in more than we allocated */
37553 if (!slot_count)
37554 break;
37555
37556 + slot_count--;
37557 +
37558 info = NULL;
37559 rcu_read_lock();
37560 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
37561 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
37562 memcpy(dest, &space, sizeof(space));
37563 dest++;
37564 space_args.total_spaces++;
37565 - slot_count--;
37566 }
37567 - if (!slot_count)
37568 - break;
37569 }
37570 up_read(&info->groups_sem);
37571 }
37572 diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
37573 --- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
37574 +++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
37575 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
37576 }
37577 spin_unlock(&rc->reloc_root_tree.lock);
37578
37579 - BUG_ON((struct btrfs_root *)node->data != root);
37580 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37581
37582 if (!del) {
37583 spin_lock(&rc->reloc_root_tree.lock);
37584 diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
37585 --- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
37586 +++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
37587 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37588 args);
37589
37590 /* start by checking things over */
37591 - ASSERT(cache->fstop_percent >= 0 &&
37592 - cache->fstop_percent < cache->fcull_percent &&
37593 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37594 cache->fcull_percent < cache->frun_percent &&
37595 cache->frun_percent < 100);
37596
37597 - ASSERT(cache->bstop_percent >= 0 &&
37598 - cache->bstop_percent < cache->bcull_percent &&
37599 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37600 cache->bcull_percent < cache->brun_percent &&
37601 cache->brun_percent < 100);
37602
37603 diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
37604 --- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
37605 +++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
37606 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
37607 if (n > buflen)
37608 return -EMSGSIZE;
37609
37610 - if (copy_to_user(_buffer, buffer, n) != 0)
37611 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
37612 return -EFAULT;
37613
37614 return n;
37615 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
37616 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37617 return -EIO;
37618
37619 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37620 + if (datalen > PAGE_SIZE - 1)
37621 return -EOPNOTSUPP;
37622
37623 /* drag the command string into the kernel so we can parse it */
37624 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
37625 if (args[0] != '%' || args[1] != '\0')
37626 return -EINVAL;
37627
37628 - if (fstop < 0 || fstop >= cache->fcull_percent)
37629 + if (fstop >= cache->fcull_percent)
37630 return cachefiles_daemon_range_error(cache, args);
37631
37632 cache->fstop_percent = fstop;
37633 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
37634 if (args[0] != '%' || args[1] != '\0')
37635 return -EINVAL;
37636
37637 - if (bstop < 0 || bstop >= cache->bcull_percent)
37638 + if (bstop >= cache->bcull_percent)
37639 return cachefiles_daemon_range_error(cache, args);
37640
37641 cache->bstop_percent = bstop;
37642 diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
37643 --- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
37644 +++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
37645 @@ -57,7 +57,7 @@ struct cachefiles_cache {
37646 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37647 struct rb_root active_nodes; /* active nodes (can't be culled) */
37648 rwlock_t active_lock; /* lock for active_nodes */
37649 - atomic_t gravecounter; /* graveyard uniquifier */
37650 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37651 unsigned frun_percent; /* when to stop culling (% files) */
37652 unsigned fcull_percent; /* when to start culling (% files) */
37653 unsigned fstop_percent; /* when to stop allocating (% files) */
37654 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
37655 * proc.c
37656 */
37657 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37658 -extern atomic_t cachefiles_lookup_histogram[HZ];
37659 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37660 -extern atomic_t cachefiles_create_histogram[HZ];
37661 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37662 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37663 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37664
37665 extern int __init cachefiles_proc_init(void);
37666 extern void cachefiles_proc_cleanup(void);
37667 static inline
37668 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37669 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37670 {
37671 unsigned long jif = jiffies - start_jif;
37672 if (jif >= HZ)
37673 jif = HZ - 1;
37674 - atomic_inc(&histogram[jif]);
37675 + atomic_inc_unchecked(&histogram[jif]);
37676 }
37677
37678 #else
37679 diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
37680 --- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
37681 +++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
37682 @@ -318,7 +318,7 @@ try_again:
37683 /* first step is to make up a grave dentry in the graveyard */
37684 sprintf(nbuffer, "%08x%08x",
37685 (uint32_t) get_seconds(),
37686 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37687 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37688
37689 /* do the multiway lock magic */
37690 trap = lock_rename(cache->graveyard, dir);
37691 diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
37692 --- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
37693 +++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
37694 @@ -14,9 +14,9 @@
37695 #include <linux/seq_file.h>
37696 #include "internal.h"
37697
37698 -atomic_t cachefiles_lookup_histogram[HZ];
37699 -atomic_t cachefiles_mkdir_histogram[HZ];
37700 -atomic_t cachefiles_create_histogram[HZ];
37701 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37702 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37703 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37704
37705 /*
37706 * display the latency histogram
37707 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37708 return 0;
37709 default:
37710 index = (unsigned long) v - 3;
37711 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37712 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37713 - z = atomic_read(&cachefiles_create_histogram[index]);
37714 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37715 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37716 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37717 if (x == 0 && y == 0 && z == 0)
37718 return 0;
37719
37720 diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
37721 --- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
37722 +++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
37723 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
37724 old_fs = get_fs();
37725 set_fs(KERNEL_DS);
37726 ret = file->f_op->write(
37727 - file, (const void __user *) data, len, &pos);
37728 + file, (__force const void __user *) data, len, &pos);
37729 set_fs(old_fs);
37730 kunmap(page);
37731 if (ret != len)
37732 diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
37733 --- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
37734 +++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
37735 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
37736 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
37737 struct ceph_mds_client *mdsc = fsc->mdsc;
37738 unsigned frag = fpos_frag(filp->f_pos);
37739 - int off = fpos_off(filp->f_pos);
37740 + unsigned int off = fpos_off(filp->f_pos);
37741 int err;
37742 u32 ftype;
37743 struct ceph_mds_reply_info_parsed *rinfo;
37744 diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
37745 --- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
37746 +++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
37747 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
37748
37749 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
37750 #ifdef CONFIG_CIFS_STATS2
37751 - atomic_set(&totBufAllocCount, 0);
37752 - atomic_set(&totSmBufAllocCount, 0);
37753 + atomic_set_unchecked(&totBufAllocCount, 0);
37754 + atomic_set_unchecked(&totSmBufAllocCount, 0);
37755 #endif /* CONFIG_CIFS_STATS2 */
37756 spin_lock(&cifs_tcp_ses_lock);
37757 list_for_each(tmp1, &cifs_tcp_ses_list) {
37758 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
37759 tcon = list_entry(tmp3,
37760 struct cifs_tcon,
37761 tcon_list);
37762 - atomic_set(&tcon->num_smbs_sent, 0);
37763 - atomic_set(&tcon->num_writes, 0);
37764 - atomic_set(&tcon->num_reads, 0);
37765 - atomic_set(&tcon->num_oplock_brks, 0);
37766 - atomic_set(&tcon->num_opens, 0);
37767 - atomic_set(&tcon->num_posixopens, 0);
37768 - atomic_set(&tcon->num_posixmkdirs, 0);
37769 - atomic_set(&tcon->num_closes, 0);
37770 - atomic_set(&tcon->num_deletes, 0);
37771 - atomic_set(&tcon->num_mkdirs, 0);
37772 - atomic_set(&tcon->num_rmdirs, 0);
37773 - atomic_set(&tcon->num_renames, 0);
37774 - atomic_set(&tcon->num_t2renames, 0);
37775 - atomic_set(&tcon->num_ffirst, 0);
37776 - atomic_set(&tcon->num_fnext, 0);
37777 - atomic_set(&tcon->num_fclose, 0);
37778 - atomic_set(&tcon->num_hardlinks, 0);
37779 - atomic_set(&tcon->num_symlinks, 0);
37780 - atomic_set(&tcon->num_locks, 0);
37781 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37782 + atomic_set_unchecked(&tcon->num_writes, 0);
37783 + atomic_set_unchecked(&tcon->num_reads, 0);
37784 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37785 + atomic_set_unchecked(&tcon->num_opens, 0);
37786 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37787 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37788 + atomic_set_unchecked(&tcon->num_closes, 0);
37789 + atomic_set_unchecked(&tcon->num_deletes, 0);
37790 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37791 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37792 + atomic_set_unchecked(&tcon->num_renames, 0);
37793 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37794 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37795 + atomic_set_unchecked(&tcon->num_fnext, 0);
37796 + atomic_set_unchecked(&tcon->num_fclose, 0);
37797 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37798 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37799 + atomic_set_unchecked(&tcon->num_locks, 0);
37800 }
37801 }
37802 }
37803 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
37804 smBufAllocCount.counter, cifs_min_small);
37805 #ifdef CONFIG_CIFS_STATS2
37806 seq_printf(m, "Total Large %d Small %d Allocations\n",
37807 - atomic_read(&totBufAllocCount),
37808 - atomic_read(&totSmBufAllocCount));
37809 + atomic_read_unchecked(&totBufAllocCount),
37810 + atomic_read_unchecked(&totSmBufAllocCount));
37811 #endif /* CONFIG_CIFS_STATS2 */
37812
37813 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
37814 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
37815 if (tcon->need_reconnect)
37816 seq_puts(m, "\tDISCONNECTED ");
37817 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37818 - atomic_read(&tcon->num_smbs_sent),
37819 - atomic_read(&tcon->num_oplock_brks));
37820 + atomic_read_unchecked(&tcon->num_smbs_sent),
37821 + atomic_read_unchecked(&tcon->num_oplock_brks));
37822 seq_printf(m, "\nReads: %d Bytes: %lld",
37823 - atomic_read(&tcon->num_reads),
37824 + atomic_read_unchecked(&tcon->num_reads),
37825 (long long)(tcon->bytes_read));
37826 seq_printf(m, "\nWrites: %d Bytes: %lld",
37827 - atomic_read(&tcon->num_writes),
37828 + atomic_read_unchecked(&tcon->num_writes),
37829 (long long)(tcon->bytes_written));
37830 seq_printf(m, "\nFlushes: %d",
37831 - atomic_read(&tcon->num_flushes));
37832 + atomic_read_unchecked(&tcon->num_flushes));
37833 seq_printf(m, "\nLocks: %d HardLinks: %d "
37834 "Symlinks: %d",
37835 - atomic_read(&tcon->num_locks),
37836 - atomic_read(&tcon->num_hardlinks),
37837 - atomic_read(&tcon->num_symlinks));
37838 + atomic_read_unchecked(&tcon->num_locks),
37839 + atomic_read_unchecked(&tcon->num_hardlinks),
37840 + atomic_read_unchecked(&tcon->num_symlinks));
37841 seq_printf(m, "\nOpens: %d Closes: %d "
37842 "Deletes: %d",
37843 - atomic_read(&tcon->num_opens),
37844 - atomic_read(&tcon->num_closes),
37845 - atomic_read(&tcon->num_deletes));
37846 + atomic_read_unchecked(&tcon->num_opens),
37847 + atomic_read_unchecked(&tcon->num_closes),
37848 + atomic_read_unchecked(&tcon->num_deletes));
37849 seq_printf(m, "\nPosix Opens: %d "
37850 "Posix Mkdirs: %d",
37851 - atomic_read(&tcon->num_posixopens),
37852 - atomic_read(&tcon->num_posixmkdirs));
37853 + atomic_read_unchecked(&tcon->num_posixopens),
37854 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37855 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37856 - atomic_read(&tcon->num_mkdirs),
37857 - atomic_read(&tcon->num_rmdirs));
37858 + atomic_read_unchecked(&tcon->num_mkdirs),
37859 + atomic_read_unchecked(&tcon->num_rmdirs));
37860 seq_printf(m, "\nRenames: %d T2 Renames %d",
37861 - atomic_read(&tcon->num_renames),
37862 - atomic_read(&tcon->num_t2renames));
37863 + atomic_read_unchecked(&tcon->num_renames),
37864 + atomic_read_unchecked(&tcon->num_t2renames));
37865 seq_printf(m, "\nFindFirst: %d FNext %d "
37866 "FClose %d",
37867 - atomic_read(&tcon->num_ffirst),
37868 - atomic_read(&tcon->num_fnext),
37869 - atomic_read(&tcon->num_fclose));
37870 + atomic_read_unchecked(&tcon->num_ffirst),
37871 + atomic_read_unchecked(&tcon->num_fnext),
37872 + atomic_read_unchecked(&tcon->num_fclose));
37873 }
37874 }
37875 }
37876 diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
37877 --- linux-3.0.4/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
37878 +++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
37879 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
37880 cifs_req_cachep = kmem_cache_create("cifs_request",
37881 CIFSMaxBufSize +
37882 MAX_CIFS_HDR_SIZE, 0,
37883 - SLAB_HWCACHE_ALIGN, NULL);
37884 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
37885 if (cifs_req_cachep == NULL)
37886 return -ENOMEM;
37887
37888 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
37889 efficient to alloc 1 per page off the slab compared to 17K (5page)
37890 alloc of large cifs buffers even when page debugging is on */
37891 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
37892 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
37893 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
37894 NULL);
37895 if (cifs_sm_req_cachep == NULL) {
37896 mempool_destroy(cifs_req_poolp);
37897 @@ -1106,8 +1106,8 @@ init_cifs(void)
37898 atomic_set(&bufAllocCount, 0);
37899 atomic_set(&smBufAllocCount, 0);
37900 #ifdef CONFIG_CIFS_STATS2
37901 - atomic_set(&totBufAllocCount, 0);
37902 - atomic_set(&totSmBufAllocCount, 0);
37903 + atomic_set_unchecked(&totBufAllocCount, 0);
37904 + atomic_set_unchecked(&totSmBufAllocCount, 0);
37905 #endif /* CONFIG_CIFS_STATS2 */
37906
37907 atomic_set(&midCount, 0);
37908 diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
37909 --- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
37910 +++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
37911 @@ -381,28 +381,28 @@ struct cifs_tcon {
37912 __u16 Flags; /* optional support bits */
37913 enum statusEnum tidStatus;
37914 #ifdef CONFIG_CIFS_STATS
37915 - atomic_t num_smbs_sent;
37916 - atomic_t num_writes;
37917 - atomic_t num_reads;
37918 - atomic_t num_flushes;
37919 - atomic_t num_oplock_brks;
37920 - atomic_t num_opens;
37921 - atomic_t num_closes;
37922 - atomic_t num_deletes;
37923 - atomic_t num_mkdirs;
37924 - atomic_t num_posixopens;
37925 - atomic_t num_posixmkdirs;
37926 - atomic_t num_rmdirs;
37927 - atomic_t num_renames;
37928 - atomic_t num_t2renames;
37929 - atomic_t num_ffirst;
37930 - atomic_t num_fnext;
37931 - atomic_t num_fclose;
37932 - atomic_t num_hardlinks;
37933 - atomic_t num_symlinks;
37934 - atomic_t num_locks;
37935 - atomic_t num_acl_get;
37936 - atomic_t num_acl_set;
37937 + atomic_unchecked_t num_smbs_sent;
37938 + atomic_unchecked_t num_writes;
37939 + atomic_unchecked_t num_reads;
37940 + atomic_unchecked_t num_flushes;
37941 + atomic_unchecked_t num_oplock_brks;
37942 + atomic_unchecked_t num_opens;
37943 + atomic_unchecked_t num_closes;
37944 + atomic_unchecked_t num_deletes;
37945 + atomic_unchecked_t num_mkdirs;
37946 + atomic_unchecked_t num_posixopens;
37947 + atomic_unchecked_t num_posixmkdirs;
37948 + atomic_unchecked_t num_rmdirs;
37949 + atomic_unchecked_t num_renames;
37950 + atomic_unchecked_t num_t2renames;
37951 + atomic_unchecked_t num_ffirst;
37952 + atomic_unchecked_t num_fnext;
37953 + atomic_unchecked_t num_fclose;
37954 + atomic_unchecked_t num_hardlinks;
37955 + atomic_unchecked_t num_symlinks;
37956 + atomic_unchecked_t num_locks;
37957 + atomic_unchecked_t num_acl_get;
37958 + atomic_unchecked_t num_acl_set;
37959 #ifdef CONFIG_CIFS_STATS2
37960 unsigned long long time_writes;
37961 unsigned long long time_reads;
37962 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
37963 }
37964
37965 #ifdef CONFIG_CIFS_STATS
37966 -#define cifs_stats_inc atomic_inc
37967 +#define cifs_stats_inc atomic_inc_unchecked
37968
37969 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
37970 unsigned int bytes)
37971 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
37972 /* Various Debug counters */
37973 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
37974 #ifdef CONFIG_CIFS_STATS2
37975 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
37976 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
37977 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
37978 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
37979 #endif
37980 GLOBAL_EXTERN atomic_t smBufAllocCount;
37981 GLOBAL_EXTERN atomic_t midCount;
37982 diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
37983 --- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
37984 +++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
37985 @@ -587,7 +587,7 @@ symlink_exit:
37986
37987 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37988 {
37989 - char *p = nd_get_link(nd);
37990 + const char *p = nd_get_link(nd);
37991 if (!IS_ERR(p))
37992 kfree(p);
37993 }
37994 diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
37995 --- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
37996 +++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
37997 @@ -156,7 +156,7 @@ cifs_buf_get(void)
37998 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
37999 atomic_inc(&bufAllocCount);
38000 #ifdef CONFIG_CIFS_STATS2
38001 - atomic_inc(&totBufAllocCount);
38002 + atomic_inc_unchecked(&totBufAllocCount);
38003 #endif /* CONFIG_CIFS_STATS2 */
38004 }
38005
38006 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38007 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38008 atomic_inc(&smBufAllocCount);
38009 #ifdef CONFIG_CIFS_STATS2
38010 - atomic_inc(&totSmBufAllocCount);
38011 + atomic_inc_unchecked(&totSmBufAllocCount);
38012 #endif /* CONFIG_CIFS_STATS2 */
38013
38014 }
38015 diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38016 --- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38017 +++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38018 @@ -24,7 +24,7 @@
38019 #include "coda_linux.h"
38020 #include "coda_cache.h"
38021
38022 -static atomic_t permission_epoch = ATOMIC_INIT(0);
38023 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38024
38025 /* replace or extend an acl cache hit */
38026 void coda_cache_enter(struct inode *inode, int mask)
38027 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38028 struct coda_inode_info *cii = ITOC(inode);
38029
38030 spin_lock(&cii->c_lock);
38031 - cii->c_cached_epoch = atomic_read(&permission_epoch);
38032 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38033 if (cii->c_uid != current_fsuid()) {
38034 cii->c_uid = current_fsuid();
38035 cii->c_cached_perm = mask;
38036 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38037 {
38038 struct coda_inode_info *cii = ITOC(inode);
38039 spin_lock(&cii->c_lock);
38040 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38041 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38042 spin_unlock(&cii->c_lock);
38043 }
38044
38045 /* remove all acl caches */
38046 void coda_cache_clear_all(struct super_block *sb)
38047 {
38048 - atomic_inc(&permission_epoch);
38049 + atomic_inc_unchecked(&permission_epoch);
38050 }
38051
38052
38053 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38054 spin_lock(&cii->c_lock);
38055 hit = (mask & cii->c_cached_perm) == mask &&
38056 cii->c_uid == current_fsuid() &&
38057 - cii->c_cached_epoch == atomic_read(&permission_epoch);
38058 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38059 spin_unlock(&cii->c_lock);
38060
38061 return hit;
38062 diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38063 --- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38064 +++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
38065 @@ -30,11 +30,13 @@
38066 #undef elf_phdr
38067 #undef elf_shdr
38068 #undef elf_note
38069 +#undef elf_dyn
38070 #undef elf_addr_t
38071 #define elfhdr elf32_hdr
38072 #define elf_phdr elf32_phdr
38073 #define elf_shdr elf32_shdr
38074 #define elf_note elf32_note
38075 +#define elf_dyn Elf32_Dyn
38076 #define elf_addr_t Elf32_Addr
38077
38078 /*
38079 diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38080 --- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
38081 +++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
38082 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38083 goto out;
38084
38085 ret = -EINVAL;
38086 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38087 + if (nr_segs > UIO_MAXIOV)
38088 goto out;
38089 if (nr_segs > fast_segs) {
38090 ret = -ENOMEM;
38091 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38092
38093 struct compat_readdir_callback {
38094 struct compat_old_linux_dirent __user *dirent;
38095 + struct file * file;
38096 int result;
38097 };
38098
38099 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38100 buf->result = -EOVERFLOW;
38101 return -EOVERFLOW;
38102 }
38103 +
38104 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38105 + return 0;
38106 +
38107 buf->result++;
38108 dirent = buf->dirent;
38109 if (!access_ok(VERIFY_WRITE, dirent,
38110 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38111
38112 buf.result = 0;
38113 buf.dirent = dirent;
38114 + buf.file = file;
38115
38116 error = vfs_readdir(file, compat_fillonedir, &buf);
38117 if (buf.result)
38118 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
38119 struct compat_getdents_callback {
38120 struct compat_linux_dirent __user *current_dir;
38121 struct compat_linux_dirent __user *previous;
38122 + struct file * file;
38123 int count;
38124 int error;
38125 };
38126 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38127 buf->error = -EOVERFLOW;
38128 return -EOVERFLOW;
38129 }
38130 +
38131 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38132 + return 0;
38133 +
38134 dirent = buf->previous;
38135 if (dirent) {
38136 if (__put_user(offset, &dirent->d_off))
38137 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38138 buf.previous = NULL;
38139 buf.count = count;
38140 buf.error = 0;
38141 + buf.file = file;
38142
38143 error = vfs_readdir(file, compat_filldir, &buf);
38144 if (error >= 0)
38145 @@ -1006,6 +1018,7 @@ out:
38146 struct compat_getdents_callback64 {
38147 struct linux_dirent64 __user *current_dir;
38148 struct linux_dirent64 __user *previous;
38149 + struct file * file;
38150 int count;
38151 int error;
38152 };
38153 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38154 buf->error = -EINVAL; /* only used if we fail.. */
38155 if (reclen > buf->count)
38156 return -EINVAL;
38157 +
38158 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38159 + return 0;
38160 +
38161 dirent = buf->previous;
38162
38163 if (dirent) {
38164 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38165 buf.previous = NULL;
38166 buf.count = count;
38167 buf.error = 0;
38168 + buf.file = file;
38169
38170 error = vfs_readdir(file, compat_filldir64, &buf);
38171 if (error >= 0)
38172 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38173 struct fdtable *fdt;
38174 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38175
38176 + pax_track_stack();
38177 +
38178 if (n < 0)
38179 goto out_nofds;
38180
38181 diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38182 --- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
38183 +++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
38184 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38185
38186 err = get_user(palp, &up->palette);
38187 err |= get_user(length, &up->length);
38188 + if (err)
38189 + return -EFAULT;
38190
38191 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38192 err = put_user(compat_ptr(palp), &up_native->palette);
38193 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38194 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38195 {
38196 unsigned int a, b;
38197 - a = *(unsigned int *)p;
38198 - b = *(unsigned int *)q;
38199 + a = *(const unsigned int *)p;
38200 + b = *(const unsigned int *)q;
38201 if (a > b)
38202 return 1;
38203 if (a < b)
38204 diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38205 --- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38206 +++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38207 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
38208 }
38209 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38210 struct configfs_dirent *next;
38211 - const char * name;
38212 + const unsigned char * name;
38213 + char d_name[sizeof(next->s_dentry->d_iname)];
38214 int len;
38215 struct inode *inode = NULL;
38216
38217 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
38218 continue;
38219
38220 name = configfs_get_name(next);
38221 - len = strlen(name);
38222 + if (next->s_dentry && name == next->s_dentry->d_iname) {
38223 + len = next->s_dentry->d_name.len;
38224 + memcpy(d_name, name, len);
38225 + name = d_name;
38226 + } else
38227 + len = strlen(name);
38228
38229 /*
38230 * We'll have a dentry and an inode for
38231 diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38232 --- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
38233 +++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
38234 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38235 mempages -= reserve;
38236
38237 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38238 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38239 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38240
38241 dcache_init();
38242 inode_init();
38243 diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38244 --- linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
38245 +++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38246 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38247 old_fs = get_fs();
38248 set_fs(get_ds());
38249 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38250 - (char __user *)lower_buf,
38251 + (__force char __user *)lower_buf,
38252 lower_bufsiz);
38253 set_fs(old_fs);
38254 if (rc < 0)
38255 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38256 }
38257 old_fs = get_fs();
38258 set_fs(get_ds());
38259 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38260 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38261 set_fs(old_fs);
38262 if (rc < 0) {
38263 kfree(buf);
38264 @@ -765,7 +765,7 @@ out:
38265 static void
38266 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38267 {
38268 - char *buf = nd_get_link(nd);
38269 + const char *buf = nd_get_link(nd);
38270 if (!IS_ERR(buf)) {
38271 /* Free the char* */
38272 kfree(buf);
38273 diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38274 --- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
38275 +++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
38276 @@ -328,7 +328,7 @@ check_list:
38277 goto out_unlock_msg_ctx;
38278 i = 5;
38279 if (msg_ctx->msg) {
38280 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
38281 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38282 goto out_unlock_msg_ctx;
38283 i += packet_length_size;
38284 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38285 diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38286 --- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
38287 +++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
38288 @@ -55,12 +55,24 @@
38289 #include <linux/pipe_fs_i.h>
38290 #include <linux/oom.h>
38291 #include <linux/compat.h>
38292 +#include <linux/random.h>
38293 +#include <linux/seq_file.h>
38294 +
38295 +#ifdef CONFIG_PAX_REFCOUNT
38296 +#include <linux/kallsyms.h>
38297 +#include <linux/kdebug.h>
38298 +#endif
38299
38300 #include <asm/uaccess.h>
38301 #include <asm/mmu_context.h>
38302 #include <asm/tlb.h>
38303 #include "internal.h"
38304
38305 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38306 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38307 +EXPORT_SYMBOL(pax_set_initial_flags_func);
38308 +#endif
38309 +
38310 int core_uses_pid;
38311 char core_pattern[CORENAME_MAX_SIZE] = "core";
38312 unsigned int core_pipe_limit;
38313 @@ -70,7 +82,7 @@ struct core_name {
38314 char *corename;
38315 int used, size;
38316 };
38317 -static atomic_t call_count = ATOMIC_INIT(1);
38318 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38319
38320 /* The maximal length of core_pattern is also specified in sysctl.c */
38321
38322 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38323 char *tmp = getname(library);
38324 int error = PTR_ERR(tmp);
38325 static const struct open_flags uselib_flags = {
38326 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38327 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38328 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38329 .intent = LOOKUP_OPEN
38330 };
38331 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
38332 int write)
38333 {
38334 struct page *page;
38335 - int ret;
38336
38337 -#ifdef CONFIG_STACK_GROWSUP
38338 - if (write) {
38339 - ret = expand_downwards(bprm->vma, pos);
38340 - if (ret < 0)
38341 - return NULL;
38342 - }
38343 -#endif
38344 - ret = get_user_pages(current, bprm->mm, pos,
38345 - 1, write, 1, &page, NULL);
38346 - if (ret <= 0)
38347 + if (0 > expand_downwards(bprm->vma, pos))
38348 + return NULL;
38349 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38350 return NULL;
38351
38352 if (write) {
38353 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38354 vma->vm_end = STACK_TOP_MAX;
38355 vma->vm_start = vma->vm_end - PAGE_SIZE;
38356 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38357 +
38358 +#ifdef CONFIG_PAX_SEGMEXEC
38359 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38360 +#endif
38361 +
38362 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38363 INIT_LIST_HEAD(&vma->anon_vma_chain);
38364
38365 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38366 mm->stack_vm = mm->total_vm = 1;
38367 up_write(&mm->mmap_sem);
38368 bprm->p = vma->vm_end - sizeof(void *);
38369 +
38370 +#ifdef CONFIG_PAX_RANDUSTACK
38371 + if (randomize_va_space)
38372 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38373 +#endif
38374 +
38375 return 0;
38376 err:
38377 up_write(&mm->mmap_sem);
38378 @@ -403,19 +418,7 @@ err:
38379 return err;
38380 }
38381
38382 -struct user_arg_ptr {
38383 -#ifdef CONFIG_COMPAT
38384 - bool is_compat;
38385 -#endif
38386 - union {
38387 - const char __user *const __user *native;
38388 -#ifdef CONFIG_COMPAT
38389 - compat_uptr_t __user *compat;
38390 -#endif
38391 - } ptr;
38392 -};
38393 -
38394 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38395 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38396 {
38397 const char __user *native;
38398
38399 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
38400 int r;
38401 mm_segment_t oldfs = get_fs();
38402 struct user_arg_ptr argv = {
38403 - .ptr.native = (const char __user *const __user *)__argv,
38404 + .ptr.native = (__force const char __user *const __user *)__argv,
38405 };
38406
38407 set_fs(KERNEL_DS);
38408 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38409 unsigned long new_end = old_end - shift;
38410 struct mmu_gather tlb;
38411
38412 - BUG_ON(new_start > new_end);
38413 + if (new_start >= new_end || new_start < mmap_min_addr)
38414 + return -ENOMEM;
38415
38416 /*
38417 * ensure there are no vmas between where we want to go
38418 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38419 if (vma != find_vma(mm, new_start))
38420 return -EFAULT;
38421
38422 +#ifdef CONFIG_PAX_SEGMEXEC
38423 + BUG_ON(pax_find_mirror_vma(vma));
38424 +#endif
38425 +
38426 /*
38427 * cover the whole range: [new_start, old_end)
38428 */
38429 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
38430 stack_top = arch_align_stack(stack_top);
38431 stack_top = PAGE_ALIGN(stack_top);
38432
38433 - if (unlikely(stack_top < mmap_min_addr) ||
38434 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38435 - return -ENOMEM;
38436 -
38437 stack_shift = vma->vm_end - stack_top;
38438
38439 bprm->p -= stack_shift;
38440 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
38441 bprm->exec -= stack_shift;
38442
38443 down_write(&mm->mmap_sem);
38444 +
38445 + /* Move stack pages down in memory. */
38446 + if (stack_shift) {
38447 + ret = shift_arg_pages(vma, stack_shift);
38448 + if (ret)
38449 + goto out_unlock;
38450 + }
38451 +
38452 vm_flags = VM_STACK_FLAGS;
38453
38454 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38455 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38456 + vm_flags &= ~VM_EXEC;
38457 +
38458 +#ifdef CONFIG_PAX_MPROTECT
38459 + if (mm->pax_flags & MF_PAX_MPROTECT)
38460 + vm_flags &= ~VM_MAYEXEC;
38461 +#endif
38462 +
38463 + }
38464 +#endif
38465 +
38466 /*
38467 * Adjust stack execute permissions; explicitly enable for
38468 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
38469 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
38470 goto out_unlock;
38471 BUG_ON(prev != vma);
38472
38473 - /* Move stack pages down in memory. */
38474 - if (stack_shift) {
38475 - ret = shift_arg_pages(vma, stack_shift);
38476 - if (ret)
38477 - goto out_unlock;
38478 - }
38479 -
38480 /* mprotect_fixup is overkill to remove the temporary stack flags */
38481 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
38482
38483 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
38484 struct file *file;
38485 int err;
38486 static const struct open_flags open_exec_flags = {
38487 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38488 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38489 .acc_mode = MAY_EXEC | MAY_OPEN,
38490 .intent = LOOKUP_OPEN
38491 };
38492 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
38493 old_fs = get_fs();
38494 set_fs(get_ds());
38495 /* The cast to a user pointer is valid due to the set_fs() */
38496 - result = vfs_read(file, (void __user *)addr, count, &pos);
38497 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
38498 set_fs(old_fs);
38499 return result;
38500 }
38501 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
38502 }
38503 rcu_read_unlock();
38504
38505 - if (p->fs->users > n_fs) {
38506 + if (atomic_read(&p->fs->users) > n_fs) {
38507 bprm->unsafe |= LSM_UNSAFE_SHARE;
38508 } else {
38509 res = -EAGAIN;
38510 @@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
38511 struct user_arg_ptr envp,
38512 struct pt_regs *regs)
38513 {
38514 +#ifdef CONFIG_GRKERNSEC
38515 + struct file *old_exec_file;
38516 + struct acl_subject_label *old_acl;
38517 + struct rlimit old_rlim[RLIM_NLIMITS];
38518 +#endif
38519 struct linux_binprm *bprm;
38520 struct file *file;
38521 struct files_struct *displaced;
38522 bool clear_in_exec;
38523 int retval;
38524 + const struct cred *cred = current_cred();
38525 +
38526 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38527 +
38528 + /*
38529 + * We move the actual failure in case of RLIMIT_NPROC excess from
38530 + * set*uid() to execve() because too many poorly written programs
38531 + * don't check setuid() return code. Here we additionally recheck
38532 + * whether NPROC limit is still exceeded.
38533 + */
38534 + if ((current->flags & PF_NPROC_EXCEEDED) &&
38535 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
38536 + retval = -EAGAIN;
38537 + goto out_ret;
38538 + }
38539 +
38540 + /* We're below the limit (still or again), so we don't want to make
38541 + * further execve() calls fail. */
38542 + current->flags &= ~PF_NPROC_EXCEEDED;
38543
38544 retval = unshare_files(&displaced);
38545 if (retval)
38546 @@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
38547 bprm->filename = filename;
38548 bprm->interp = filename;
38549
38550 + if (gr_process_user_ban()) {
38551 + retval = -EPERM;
38552 + goto out_file;
38553 + }
38554 +
38555 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38556 + retval = -EACCES;
38557 + goto out_file;
38558 + }
38559 +
38560 retval = bprm_mm_init(bprm);
38561 if (retval)
38562 goto out_file;
38563 @@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
38564 if (retval < 0)
38565 goto out;
38566
38567 + if (!gr_tpe_allow(file)) {
38568 + retval = -EACCES;
38569 + goto out;
38570 + }
38571 +
38572 + if (gr_check_crash_exec(file)) {
38573 + retval = -EACCES;
38574 + goto out;
38575 + }
38576 +
38577 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38578 +
38579 + gr_handle_exec_args(bprm, argv);
38580 +
38581 +#ifdef CONFIG_GRKERNSEC
38582 + old_acl = current->acl;
38583 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38584 + old_exec_file = current->exec_file;
38585 + get_file(file);
38586 + current->exec_file = file;
38587 +#endif
38588 +
38589 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38590 + bprm->unsafe & LSM_UNSAFE_SHARE);
38591 + if (retval < 0)
38592 + goto out_fail;
38593 +
38594 retval = search_binary_handler(bprm,regs);
38595 if (retval < 0)
38596 - goto out;
38597 + goto out_fail;
38598 +#ifdef CONFIG_GRKERNSEC
38599 + if (old_exec_file)
38600 + fput(old_exec_file);
38601 +#endif
38602
38603 /* execve succeeded */
38604 current->fs->in_exec = 0;
38605 @@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
38606 put_files_struct(displaced);
38607 return retval;
38608
38609 +out_fail:
38610 +#ifdef CONFIG_GRKERNSEC
38611 + current->acl = old_acl;
38612 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38613 + fput(current->exec_file);
38614 + current->exec_file = old_exec_file;
38615 +#endif
38616 +
38617 out:
38618 if (bprm->mm) {
38619 acct_arg_size(bprm, 0);
38620 @@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
38621 {
38622 char *old_corename = cn->corename;
38623
38624 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
38625 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
38626 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
38627
38628 if (!cn->corename) {
38629 @@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
38630 int pid_in_pattern = 0;
38631 int err = 0;
38632
38633 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
38634 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
38635 cn->corename = kmalloc(cn->size, GFP_KERNEL);
38636 cn->used = 0;
38637
38638 @@ -1758,6 +1848,219 @@ out:
38639 return ispipe;
38640 }
38641
38642 +int pax_check_flags(unsigned long *flags)
38643 +{
38644 + int retval = 0;
38645 +
38646 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38647 + if (*flags & MF_PAX_SEGMEXEC)
38648 + {
38649 + *flags &= ~MF_PAX_SEGMEXEC;
38650 + retval = -EINVAL;
38651 + }
38652 +#endif
38653 +
38654 + if ((*flags & MF_PAX_PAGEEXEC)
38655 +
38656 +#ifdef CONFIG_PAX_PAGEEXEC
38657 + && (*flags & MF_PAX_SEGMEXEC)
38658 +#endif
38659 +
38660 + )
38661 + {
38662 + *flags &= ~MF_PAX_PAGEEXEC;
38663 + retval = -EINVAL;
38664 + }
38665 +
38666 + if ((*flags & MF_PAX_MPROTECT)
38667 +
38668 +#ifdef CONFIG_PAX_MPROTECT
38669 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38670 +#endif
38671 +
38672 + )
38673 + {
38674 + *flags &= ~MF_PAX_MPROTECT;
38675 + retval = -EINVAL;
38676 + }
38677 +
38678 + if ((*flags & MF_PAX_EMUTRAMP)
38679 +
38680 +#ifdef CONFIG_PAX_EMUTRAMP
38681 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38682 +#endif
38683 +
38684 + )
38685 + {
38686 + *flags &= ~MF_PAX_EMUTRAMP;
38687 + retval = -EINVAL;
38688 + }
38689 +
38690 + return retval;
38691 +}
38692 +
38693 +EXPORT_SYMBOL(pax_check_flags);
38694 +
38695 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38696 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38697 +{
38698 + struct task_struct *tsk = current;
38699 + struct mm_struct *mm = current->mm;
38700 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38701 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38702 + char *path_exec = NULL;
38703 + char *path_fault = NULL;
38704 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38705 +
38706 + if (buffer_exec && buffer_fault) {
38707 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38708 +
38709 + down_read(&mm->mmap_sem);
38710 + vma = mm->mmap;
38711 + while (vma && (!vma_exec || !vma_fault)) {
38712 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38713 + vma_exec = vma;
38714 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38715 + vma_fault = vma;
38716 + vma = vma->vm_next;
38717 + }
38718 + if (vma_exec) {
38719 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38720 + if (IS_ERR(path_exec))
38721 + path_exec = "<path too long>";
38722 + else {
38723 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38724 + if (path_exec) {
38725 + *path_exec = 0;
38726 + path_exec = buffer_exec;
38727 + } else
38728 + path_exec = "<path too long>";
38729 + }
38730 + }
38731 + if (vma_fault) {
38732 + start = vma_fault->vm_start;
38733 + end = vma_fault->vm_end;
38734 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38735 + if (vma_fault->vm_file) {
38736 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38737 + if (IS_ERR(path_fault))
38738 + path_fault = "<path too long>";
38739 + else {
38740 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38741 + if (path_fault) {
38742 + *path_fault = 0;
38743 + path_fault = buffer_fault;
38744 + } else
38745 + path_fault = "<path too long>";
38746 + }
38747 + } else
38748 + path_fault = "<anonymous mapping>";
38749 + }
38750 + up_read(&mm->mmap_sem);
38751 + }
38752 + if (tsk->signal->curr_ip)
38753 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38754 + else
38755 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38756 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38757 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38758 + task_uid(tsk), task_euid(tsk), pc, sp);
38759 + free_page((unsigned long)buffer_exec);
38760 + free_page((unsigned long)buffer_fault);
38761 + pax_report_insns(pc, sp);
38762 + do_coredump(SIGKILL, SIGKILL, regs);
38763 +}
38764 +#endif
38765 +
38766 +#ifdef CONFIG_PAX_REFCOUNT
38767 +void pax_report_refcount_overflow(struct pt_regs *regs)
38768 +{
38769 + if (current->signal->curr_ip)
38770 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38771 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38772 + else
38773 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38774 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38775 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38776 + show_regs(regs);
38777 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
38778 +}
38779 +#endif
38780 +
38781 +#ifdef CONFIG_PAX_USERCOPY
38782 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38783 +int object_is_on_stack(const void *obj, unsigned long len)
38784 +{
38785 + const void * const stack = task_stack_page(current);
38786 + const void * const stackend = stack + THREAD_SIZE;
38787 +
38788 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38789 + const void *frame = NULL;
38790 + const void *oldframe;
38791 +#endif
38792 +
38793 + if (obj + len < obj)
38794 + return -1;
38795 +
38796 + if (obj + len <= stack || stackend <= obj)
38797 + return 0;
38798 +
38799 + if (obj < stack || stackend < obj + len)
38800 + return -1;
38801 +
38802 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38803 + oldframe = __builtin_frame_address(1);
38804 + if (oldframe)
38805 + frame = __builtin_frame_address(2);
38806 + /*
38807 + low ----------------------------------------------> high
38808 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38809 + ^----------------^
38810 + allow copies only within here
38811 + */
38812 + while (stack <= frame && frame < stackend) {
38813 + /* if obj + len extends past the last frame, this
38814 + check won't pass and the next frame will be 0,
38815 + causing us to bail out and correctly report
38816 + the copy as invalid
38817 + */
38818 + if (obj + len <= frame)
38819 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38820 + oldframe = frame;
38821 + frame = *(const void * const *)frame;
38822 + }
38823 + return -1;
38824 +#else
38825 + return 1;
38826 +#endif
38827 +}
38828 +
38829 +
38830 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38831 +{
38832 + if (current->signal->curr_ip)
38833 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38834 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38835 + else
38836 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38837 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38838 + dump_stack();
38839 + gr_handle_kernel_exploit();
38840 + do_group_exit(SIGKILL);
38841 +}
38842 +#endif
38843 +
38844 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38845 +void pax_track_stack(void)
38846 +{
38847 + unsigned long sp = (unsigned long)&sp;
38848 + if (sp < current_thread_info()->lowest_stack &&
38849 + sp > (unsigned long)task_stack_page(current))
38850 + current_thread_info()->lowest_stack = sp;
38851 +}
38852 +EXPORT_SYMBOL(pax_track_stack);
38853 +#endif
38854 +
38855 static int zap_process(struct task_struct *start, int exit_code)
38856 {
38857 struct task_struct *t;
38858 @@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
38859 pipe = file->f_path.dentry->d_inode->i_pipe;
38860
38861 pipe_lock(pipe);
38862 - pipe->readers++;
38863 - pipe->writers--;
38864 + atomic_inc(&pipe->readers);
38865 + atomic_dec(&pipe->writers);
38866
38867 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38868 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38869 wake_up_interruptible_sync(&pipe->wait);
38870 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38871 pipe_wait(pipe);
38872 }
38873
38874 - pipe->readers--;
38875 - pipe->writers++;
38876 + atomic_dec(&pipe->readers);
38877 + atomic_inc(&pipe->writers);
38878 pipe_unlock(pipe);
38879
38880 }
38881 @@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
38882 int retval = 0;
38883 int flag = 0;
38884 int ispipe;
38885 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38886 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38887 struct coredump_params cprm = {
38888 .signr = signr,
38889 .regs = regs,
38890 @@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
38891
38892 audit_core_dumps(signr);
38893
38894 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38895 + gr_handle_brute_attach(current, cprm.mm_flags);
38896 +
38897 binfmt = mm->binfmt;
38898 if (!binfmt || !binfmt->core_dump)
38899 goto fail;
38900 @@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
38901 goto fail_corename;
38902 }
38903
38904 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38905 +
38906 if (ispipe) {
38907 int dump_count;
38908 char **helper_argv;
38909 @@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
38910 }
38911 cprm.limit = RLIM_INFINITY;
38912
38913 - dump_count = atomic_inc_return(&core_dump_count);
38914 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38915 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38916 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38917 task_tgid_vnr(current), current->comm);
38918 @@ -2192,7 +2500,7 @@ close_fail:
38919 filp_close(cprm.file, NULL);
38920 fail_dropcount:
38921 if (ispipe)
38922 - atomic_dec(&core_dump_count);
38923 + atomic_dec_unchecked(&core_dump_count);
38924 fail_unlock:
38925 kfree(cn.corename);
38926 fail_corename:
38927 diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
38928 --- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
38929 +++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
38930 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38931
38932 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38933 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38934 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38935 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38936 sbi->s_resuid != current_fsuid() &&
38937 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38938 return 0;
38939 diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
38940 --- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
38941 +++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
38942 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
38943
38944 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38945 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38946 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38947 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38948 sbi->s_resuid != current_fsuid() &&
38949 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38950 return 0;
38951 diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
38952 --- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
38953 +++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
38954 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
38955 /* Hm, nope. Are (enough) root reserved blocks available? */
38956 if (sbi->s_resuid == current_fsuid() ||
38957 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38958 - capable(CAP_SYS_RESOURCE) ||
38959 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
38960 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
38961 + capable_nolog(CAP_SYS_RESOURCE)) {
38962
38963 if (free_blocks >= (nblocks + dirty_blocks))
38964 return 1;
38965 diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
38966 --- linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
38967 +++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
38968 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
38969 unsigned long s_mb_last_start;
38970
38971 /* stats for buddy allocator */
38972 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38973 - atomic_t s_bal_success; /* we found long enough chunks */
38974 - atomic_t s_bal_allocated; /* in blocks */
38975 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38976 - atomic_t s_bal_goals; /* goal hits */
38977 - atomic_t s_bal_breaks; /* too long searches */
38978 - atomic_t s_bal_2orders; /* 2^order hits */
38979 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38980 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38981 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38982 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38983 + atomic_unchecked_t s_bal_goals; /* goal hits */
38984 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38985 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38986 spinlock_t s_bal_lock;
38987 unsigned long s_mb_buddies_generated;
38988 unsigned long long s_mb_generation_time;
38989 - atomic_t s_mb_lost_chunks;
38990 - atomic_t s_mb_preallocated;
38991 - atomic_t s_mb_discarded;
38992 + atomic_unchecked_t s_mb_lost_chunks;
38993 + atomic_unchecked_t s_mb_preallocated;
38994 + atomic_unchecked_t s_mb_discarded;
38995 atomic_t s_lock_busy;
38996
38997 /* locality groups */
38998 diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
38999 --- linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
39000 +++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
39001 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
39002 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39003
39004 if (EXT4_SB(sb)->s_mb_stats)
39005 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39006 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39007
39008 break;
39009 }
39010 @@ -2087,7 +2087,7 @@ repeat:
39011 ac->ac_status = AC_STATUS_CONTINUE;
39012 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39013 cr = 3;
39014 - atomic_inc(&sbi->s_mb_lost_chunks);
39015 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39016 goto repeat;
39017 }
39018 }
39019 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39020 ext4_grpblk_t counters[16];
39021 } sg;
39022
39023 + pax_track_stack();
39024 +
39025 group--;
39026 if (group == 0)
39027 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39028 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39029 if (sbi->s_mb_stats) {
39030 printk(KERN_INFO
39031 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39032 - atomic_read(&sbi->s_bal_allocated),
39033 - atomic_read(&sbi->s_bal_reqs),
39034 - atomic_read(&sbi->s_bal_success));
39035 + atomic_read_unchecked(&sbi->s_bal_allocated),
39036 + atomic_read_unchecked(&sbi->s_bal_reqs),
39037 + atomic_read_unchecked(&sbi->s_bal_success));
39038 printk(KERN_INFO
39039 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39040 "%u 2^N hits, %u breaks, %u lost\n",
39041 - atomic_read(&sbi->s_bal_ex_scanned),
39042 - atomic_read(&sbi->s_bal_goals),
39043 - atomic_read(&sbi->s_bal_2orders),
39044 - atomic_read(&sbi->s_bal_breaks),
39045 - atomic_read(&sbi->s_mb_lost_chunks));
39046 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39047 + atomic_read_unchecked(&sbi->s_bal_goals),
39048 + atomic_read_unchecked(&sbi->s_bal_2orders),
39049 + atomic_read_unchecked(&sbi->s_bal_breaks),
39050 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39051 printk(KERN_INFO
39052 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39053 sbi->s_mb_buddies_generated++,
39054 sbi->s_mb_generation_time);
39055 printk(KERN_INFO
39056 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39057 - atomic_read(&sbi->s_mb_preallocated),
39058 - atomic_read(&sbi->s_mb_discarded));
39059 + atomic_read_unchecked(&sbi->s_mb_preallocated),
39060 + atomic_read_unchecked(&sbi->s_mb_discarded));
39061 }
39062
39063 free_percpu(sbi->s_locality_groups);
39064 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39065 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39066
39067 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39068 - atomic_inc(&sbi->s_bal_reqs);
39069 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39070 + atomic_inc_unchecked(&sbi->s_bal_reqs);
39071 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39072 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39073 - atomic_inc(&sbi->s_bal_success);
39074 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39075 + atomic_inc_unchecked(&sbi->s_bal_success);
39076 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39077 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39078 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39079 - atomic_inc(&sbi->s_bal_goals);
39080 + atomic_inc_unchecked(&sbi->s_bal_goals);
39081 if (ac->ac_found > sbi->s_mb_max_to_scan)
39082 - atomic_inc(&sbi->s_bal_breaks);
39083 + atomic_inc_unchecked(&sbi->s_bal_breaks);
39084 }
39085
39086 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39087 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39088 trace_ext4_mb_new_inode_pa(ac, pa);
39089
39090 ext4_mb_use_inode_pa(ac, pa);
39091 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39092 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39093
39094 ei = EXT4_I(ac->ac_inode);
39095 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39096 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39097 trace_ext4_mb_new_group_pa(ac, pa);
39098
39099 ext4_mb_use_group_pa(ac, pa);
39100 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39101 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39102
39103 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39104 lg = ac->ac_lg;
39105 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39106 * from the bitmap and continue.
39107 */
39108 }
39109 - atomic_add(free, &sbi->s_mb_discarded);
39110 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
39111
39112 return err;
39113 }
39114 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39115 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39116 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39117 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39118 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39119 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39120 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39121
39122 return 0;
39123 diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39124 --- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
39125 +++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
39126 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39127 if (err)
39128 return err;
39129
39130 + if (gr_handle_chroot_fowner(pid, type))
39131 + return -ENOENT;
39132 + if (gr_check_protected_task_fowner(pid, type))
39133 + return -EACCES;
39134 +
39135 f_modown(filp, pid, type, force);
39136 return 0;
39137 }
39138 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39139 switch (cmd) {
39140 case F_DUPFD:
39141 case F_DUPFD_CLOEXEC:
39142 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39143 if (arg >= rlimit(RLIMIT_NOFILE))
39144 break;
39145 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39146 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39147 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39148 * is defined as O_NONBLOCK on some platforms and not on others.
39149 */
39150 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39151 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39152 O_RDONLY | O_WRONLY | O_RDWR |
39153 O_CREAT | O_EXCL | O_NOCTTY |
39154 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
39155 __O_SYNC | O_DSYNC | FASYNC |
39156 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
39157 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
39158 - __FMODE_EXEC | O_PATH
39159 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
39160 ));
39161
39162 fasync_cache = kmem_cache_create("fasync_cache",
39163 diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39164 --- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
39165 +++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
39166 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39167 */
39168 filp->f_op = &read_pipefifo_fops;
39169 pipe->r_counter++;
39170 - if (pipe->readers++ == 0)
39171 + if (atomic_inc_return(&pipe->readers) == 1)
39172 wake_up_partner(inode);
39173
39174 - if (!pipe->writers) {
39175 + if (!atomic_read(&pipe->writers)) {
39176 if ((filp->f_flags & O_NONBLOCK)) {
39177 /* suppress POLLHUP until we have
39178 * seen a writer */
39179 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39180 * errno=ENXIO when there is no process reading the FIFO.
39181 */
39182 ret = -ENXIO;
39183 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39184 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39185 goto err;
39186
39187 filp->f_op = &write_pipefifo_fops;
39188 pipe->w_counter++;
39189 - if (!pipe->writers++)
39190 + if (atomic_inc_return(&pipe->writers) == 1)
39191 wake_up_partner(inode);
39192
39193 - if (!pipe->readers) {
39194 + if (!atomic_read(&pipe->readers)) {
39195 wait_for_partner(inode, &pipe->r_counter);
39196 if (signal_pending(current))
39197 goto err_wr;
39198 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39199 */
39200 filp->f_op = &rdwr_pipefifo_fops;
39201
39202 - pipe->readers++;
39203 - pipe->writers++;
39204 + atomic_inc(&pipe->readers);
39205 + atomic_inc(&pipe->writers);
39206 pipe->r_counter++;
39207 pipe->w_counter++;
39208 - if (pipe->readers == 1 || pipe->writers == 1)
39209 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39210 wake_up_partner(inode);
39211 break;
39212
39213 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39214 return 0;
39215
39216 err_rd:
39217 - if (!--pipe->readers)
39218 + if (atomic_dec_and_test(&pipe->readers))
39219 wake_up_interruptible(&pipe->wait);
39220 ret = -ERESTARTSYS;
39221 goto err;
39222
39223 err_wr:
39224 - if (!--pipe->writers)
39225 + if (atomic_dec_and_test(&pipe->writers))
39226 wake_up_interruptible(&pipe->wait);
39227 ret = -ERESTARTSYS;
39228 goto err;
39229
39230 err:
39231 - if (!pipe->readers && !pipe->writers)
39232 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39233 free_pipe_info(inode);
39234
39235 err_nocleanup:
39236 diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39237 --- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
39238 +++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
39239 @@ -15,6 +15,7 @@
39240 #include <linux/slab.h>
39241 #include <linux/vmalloc.h>
39242 #include <linux/file.h>
39243 +#include <linux/security.h>
39244 #include <linux/fdtable.h>
39245 #include <linux/bitops.h>
39246 #include <linux/interrupt.h>
39247 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39248 * N.B. For clone tasks sharing a files structure, this test
39249 * will limit the total number of files that can be opened.
39250 */
39251 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39252 if (nr >= rlimit(RLIMIT_NOFILE))
39253 return -EMFILE;
39254
39255 diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39256 --- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
39257 +++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
39258 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39259 int len = dot ? dot - name : strlen(name);
39260
39261 fs = __get_fs_type(name, len);
39262 +
39263 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
39264 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39265 +#else
39266 if (!fs && (request_module("%.*s", len, name) == 0))
39267 +#endif
39268 fs = __get_fs_type(name, len);
39269
39270 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39271 diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39272 --- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
39273 +++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
39274 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39275 parent ? (char *) parent->def->name : "<no-parent>",
39276 def->name, netfs_data);
39277
39278 - fscache_stat(&fscache_n_acquires);
39279 + fscache_stat_unchecked(&fscache_n_acquires);
39280
39281 /* if there's no parent cookie, then we don't create one here either */
39282 if (!parent) {
39283 - fscache_stat(&fscache_n_acquires_null);
39284 + fscache_stat_unchecked(&fscache_n_acquires_null);
39285 _leave(" [no parent]");
39286 return NULL;
39287 }
39288 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39289 /* allocate and initialise a cookie */
39290 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39291 if (!cookie) {
39292 - fscache_stat(&fscache_n_acquires_oom);
39293 + fscache_stat_unchecked(&fscache_n_acquires_oom);
39294 _leave(" [ENOMEM]");
39295 return NULL;
39296 }
39297 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39298
39299 switch (cookie->def->type) {
39300 case FSCACHE_COOKIE_TYPE_INDEX:
39301 - fscache_stat(&fscache_n_cookie_index);
39302 + fscache_stat_unchecked(&fscache_n_cookie_index);
39303 break;
39304 case FSCACHE_COOKIE_TYPE_DATAFILE:
39305 - fscache_stat(&fscache_n_cookie_data);
39306 + fscache_stat_unchecked(&fscache_n_cookie_data);
39307 break;
39308 default:
39309 - fscache_stat(&fscache_n_cookie_special);
39310 + fscache_stat_unchecked(&fscache_n_cookie_special);
39311 break;
39312 }
39313
39314 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39315 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39316 atomic_dec(&parent->n_children);
39317 __fscache_cookie_put(cookie);
39318 - fscache_stat(&fscache_n_acquires_nobufs);
39319 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39320 _leave(" = NULL");
39321 return NULL;
39322 }
39323 }
39324
39325 - fscache_stat(&fscache_n_acquires_ok);
39326 + fscache_stat_unchecked(&fscache_n_acquires_ok);
39327 _leave(" = %p", cookie);
39328 return cookie;
39329 }
39330 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39331 cache = fscache_select_cache_for_object(cookie->parent);
39332 if (!cache) {
39333 up_read(&fscache_addremove_sem);
39334 - fscache_stat(&fscache_n_acquires_no_cache);
39335 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39336 _leave(" = -ENOMEDIUM [no cache]");
39337 return -ENOMEDIUM;
39338 }
39339 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39340 object = cache->ops->alloc_object(cache, cookie);
39341 fscache_stat_d(&fscache_n_cop_alloc_object);
39342 if (IS_ERR(object)) {
39343 - fscache_stat(&fscache_n_object_no_alloc);
39344 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
39345 ret = PTR_ERR(object);
39346 goto error;
39347 }
39348
39349 - fscache_stat(&fscache_n_object_alloc);
39350 + fscache_stat_unchecked(&fscache_n_object_alloc);
39351
39352 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39353
39354 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39355 struct fscache_object *object;
39356 struct hlist_node *_p;
39357
39358 - fscache_stat(&fscache_n_updates);
39359 + fscache_stat_unchecked(&fscache_n_updates);
39360
39361 if (!cookie) {
39362 - fscache_stat(&fscache_n_updates_null);
39363 + fscache_stat_unchecked(&fscache_n_updates_null);
39364 _leave(" [no cookie]");
39365 return;
39366 }
39367 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39368 struct fscache_object *object;
39369 unsigned long event;
39370
39371 - fscache_stat(&fscache_n_relinquishes);
39372 + fscache_stat_unchecked(&fscache_n_relinquishes);
39373 if (retire)
39374 - fscache_stat(&fscache_n_relinquishes_retire);
39375 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39376
39377 if (!cookie) {
39378 - fscache_stat(&fscache_n_relinquishes_null);
39379 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
39380 _leave(" [no cookie]");
39381 return;
39382 }
39383 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39384
39385 /* wait for the cookie to finish being instantiated (or to fail) */
39386 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39387 - fscache_stat(&fscache_n_relinquishes_waitcrt);
39388 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39389 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39390 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39391 }
39392 diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39393 --- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
39394 +++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
39395 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39396 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39397 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39398
39399 -extern atomic_t fscache_n_op_pend;
39400 -extern atomic_t fscache_n_op_run;
39401 -extern atomic_t fscache_n_op_enqueue;
39402 -extern atomic_t fscache_n_op_deferred_release;
39403 -extern atomic_t fscache_n_op_release;
39404 -extern atomic_t fscache_n_op_gc;
39405 -extern atomic_t fscache_n_op_cancelled;
39406 -extern atomic_t fscache_n_op_rejected;
39407 -
39408 -extern atomic_t fscache_n_attr_changed;
39409 -extern atomic_t fscache_n_attr_changed_ok;
39410 -extern atomic_t fscache_n_attr_changed_nobufs;
39411 -extern atomic_t fscache_n_attr_changed_nomem;
39412 -extern atomic_t fscache_n_attr_changed_calls;
39413 -
39414 -extern atomic_t fscache_n_allocs;
39415 -extern atomic_t fscache_n_allocs_ok;
39416 -extern atomic_t fscache_n_allocs_wait;
39417 -extern atomic_t fscache_n_allocs_nobufs;
39418 -extern atomic_t fscache_n_allocs_intr;
39419 -extern atomic_t fscache_n_allocs_object_dead;
39420 -extern atomic_t fscache_n_alloc_ops;
39421 -extern atomic_t fscache_n_alloc_op_waits;
39422 -
39423 -extern atomic_t fscache_n_retrievals;
39424 -extern atomic_t fscache_n_retrievals_ok;
39425 -extern atomic_t fscache_n_retrievals_wait;
39426 -extern atomic_t fscache_n_retrievals_nodata;
39427 -extern atomic_t fscache_n_retrievals_nobufs;
39428 -extern atomic_t fscache_n_retrievals_intr;
39429 -extern atomic_t fscache_n_retrievals_nomem;
39430 -extern atomic_t fscache_n_retrievals_object_dead;
39431 -extern atomic_t fscache_n_retrieval_ops;
39432 -extern atomic_t fscache_n_retrieval_op_waits;
39433 -
39434 -extern atomic_t fscache_n_stores;
39435 -extern atomic_t fscache_n_stores_ok;
39436 -extern atomic_t fscache_n_stores_again;
39437 -extern atomic_t fscache_n_stores_nobufs;
39438 -extern atomic_t fscache_n_stores_oom;
39439 -extern atomic_t fscache_n_store_ops;
39440 -extern atomic_t fscache_n_store_calls;
39441 -extern atomic_t fscache_n_store_pages;
39442 -extern atomic_t fscache_n_store_radix_deletes;
39443 -extern atomic_t fscache_n_store_pages_over_limit;
39444 -
39445 -extern atomic_t fscache_n_store_vmscan_not_storing;
39446 -extern atomic_t fscache_n_store_vmscan_gone;
39447 -extern atomic_t fscache_n_store_vmscan_busy;
39448 -extern atomic_t fscache_n_store_vmscan_cancelled;
39449 -
39450 -extern atomic_t fscache_n_marks;
39451 -extern atomic_t fscache_n_uncaches;
39452 -
39453 -extern atomic_t fscache_n_acquires;
39454 -extern atomic_t fscache_n_acquires_null;
39455 -extern atomic_t fscache_n_acquires_no_cache;
39456 -extern atomic_t fscache_n_acquires_ok;
39457 -extern atomic_t fscache_n_acquires_nobufs;
39458 -extern atomic_t fscache_n_acquires_oom;
39459 -
39460 -extern atomic_t fscache_n_updates;
39461 -extern atomic_t fscache_n_updates_null;
39462 -extern atomic_t fscache_n_updates_run;
39463 -
39464 -extern atomic_t fscache_n_relinquishes;
39465 -extern atomic_t fscache_n_relinquishes_null;
39466 -extern atomic_t fscache_n_relinquishes_waitcrt;
39467 -extern atomic_t fscache_n_relinquishes_retire;
39468 -
39469 -extern atomic_t fscache_n_cookie_index;
39470 -extern atomic_t fscache_n_cookie_data;
39471 -extern atomic_t fscache_n_cookie_special;
39472 -
39473 -extern atomic_t fscache_n_object_alloc;
39474 -extern atomic_t fscache_n_object_no_alloc;
39475 -extern atomic_t fscache_n_object_lookups;
39476 -extern atomic_t fscache_n_object_lookups_negative;
39477 -extern atomic_t fscache_n_object_lookups_positive;
39478 -extern atomic_t fscache_n_object_lookups_timed_out;
39479 -extern atomic_t fscache_n_object_created;
39480 -extern atomic_t fscache_n_object_avail;
39481 -extern atomic_t fscache_n_object_dead;
39482 -
39483 -extern atomic_t fscache_n_checkaux_none;
39484 -extern atomic_t fscache_n_checkaux_okay;
39485 -extern atomic_t fscache_n_checkaux_update;
39486 -extern atomic_t fscache_n_checkaux_obsolete;
39487 +extern atomic_unchecked_t fscache_n_op_pend;
39488 +extern atomic_unchecked_t fscache_n_op_run;
39489 +extern atomic_unchecked_t fscache_n_op_enqueue;
39490 +extern atomic_unchecked_t fscache_n_op_deferred_release;
39491 +extern atomic_unchecked_t fscache_n_op_release;
39492 +extern atomic_unchecked_t fscache_n_op_gc;
39493 +extern atomic_unchecked_t fscache_n_op_cancelled;
39494 +extern atomic_unchecked_t fscache_n_op_rejected;
39495 +
39496 +extern atomic_unchecked_t fscache_n_attr_changed;
39497 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
39498 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39499 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39500 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
39501 +
39502 +extern atomic_unchecked_t fscache_n_allocs;
39503 +extern atomic_unchecked_t fscache_n_allocs_ok;
39504 +extern atomic_unchecked_t fscache_n_allocs_wait;
39505 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
39506 +extern atomic_unchecked_t fscache_n_allocs_intr;
39507 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
39508 +extern atomic_unchecked_t fscache_n_alloc_ops;
39509 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
39510 +
39511 +extern atomic_unchecked_t fscache_n_retrievals;
39512 +extern atomic_unchecked_t fscache_n_retrievals_ok;
39513 +extern atomic_unchecked_t fscache_n_retrievals_wait;
39514 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
39515 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39516 +extern atomic_unchecked_t fscache_n_retrievals_intr;
39517 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
39518 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39519 +extern atomic_unchecked_t fscache_n_retrieval_ops;
39520 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39521 +
39522 +extern atomic_unchecked_t fscache_n_stores;
39523 +extern atomic_unchecked_t fscache_n_stores_ok;
39524 +extern atomic_unchecked_t fscache_n_stores_again;
39525 +extern atomic_unchecked_t fscache_n_stores_nobufs;
39526 +extern atomic_unchecked_t fscache_n_stores_oom;
39527 +extern atomic_unchecked_t fscache_n_store_ops;
39528 +extern atomic_unchecked_t fscache_n_store_calls;
39529 +extern atomic_unchecked_t fscache_n_store_pages;
39530 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
39531 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39532 +
39533 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39534 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39535 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39536 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39537 +
39538 +extern atomic_unchecked_t fscache_n_marks;
39539 +extern atomic_unchecked_t fscache_n_uncaches;
39540 +
39541 +extern atomic_unchecked_t fscache_n_acquires;
39542 +extern atomic_unchecked_t fscache_n_acquires_null;
39543 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
39544 +extern atomic_unchecked_t fscache_n_acquires_ok;
39545 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
39546 +extern atomic_unchecked_t fscache_n_acquires_oom;
39547 +
39548 +extern atomic_unchecked_t fscache_n_updates;
39549 +extern atomic_unchecked_t fscache_n_updates_null;
39550 +extern atomic_unchecked_t fscache_n_updates_run;
39551 +
39552 +extern atomic_unchecked_t fscache_n_relinquishes;
39553 +extern atomic_unchecked_t fscache_n_relinquishes_null;
39554 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39555 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
39556 +
39557 +extern atomic_unchecked_t fscache_n_cookie_index;
39558 +extern atomic_unchecked_t fscache_n_cookie_data;
39559 +extern atomic_unchecked_t fscache_n_cookie_special;
39560 +
39561 +extern atomic_unchecked_t fscache_n_object_alloc;
39562 +extern atomic_unchecked_t fscache_n_object_no_alloc;
39563 +extern atomic_unchecked_t fscache_n_object_lookups;
39564 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
39565 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
39566 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39567 +extern atomic_unchecked_t fscache_n_object_created;
39568 +extern atomic_unchecked_t fscache_n_object_avail;
39569 +extern atomic_unchecked_t fscache_n_object_dead;
39570 +
39571 +extern atomic_unchecked_t fscache_n_checkaux_none;
39572 +extern atomic_unchecked_t fscache_n_checkaux_okay;
39573 +extern atomic_unchecked_t fscache_n_checkaux_update;
39574 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39575
39576 extern atomic_t fscache_n_cop_alloc_object;
39577 extern atomic_t fscache_n_cop_lookup_object;
39578 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
39579 atomic_inc(stat);
39580 }
39581
39582 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39583 +{
39584 + atomic_inc_unchecked(stat);
39585 +}
39586 +
39587 static inline void fscache_stat_d(atomic_t *stat)
39588 {
39589 atomic_dec(stat);
39590 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
39591
39592 #define __fscache_stat(stat) (NULL)
39593 #define fscache_stat(stat) do {} while (0)
39594 +#define fscache_stat_unchecked(stat) do {} while (0)
39595 #define fscache_stat_d(stat) do {} while (0)
39596 #endif
39597
39598 diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
39599 --- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
39600 +++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
39601 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
39602 /* update the object metadata on disk */
39603 case FSCACHE_OBJECT_UPDATING:
39604 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39605 - fscache_stat(&fscache_n_updates_run);
39606 + fscache_stat_unchecked(&fscache_n_updates_run);
39607 fscache_stat(&fscache_n_cop_update_object);
39608 object->cache->ops->update_object(object);
39609 fscache_stat_d(&fscache_n_cop_update_object);
39610 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
39611 spin_lock(&object->lock);
39612 object->state = FSCACHE_OBJECT_DEAD;
39613 spin_unlock(&object->lock);
39614 - fscache_stat(&fscache_n_object_dead);
39615 + fscache_stat_unchecked(&fscache_n_object_dead);
39616 goto terminal_transit;
39617
39618 /* handle the parent cache of this object being withdrawn from
39619 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
39620 spin_lock(&object->lock);
39621 object->state = FSCACHE_OBJECT_DEAD;
39622 spin_unlock(&object->lock);
39623 - fscache_stat(&fscache_n_object_dead);
39624 + fscache_stat_unchecked(&fscache_n_object_dead);
39625 goto terminal_transit;
39626
39627 /* complain about the object being woken up once it is
39628 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
39629 parent->cookie->def->name, cookie->def->name,
39630 object->cache->tag->name);
39631
39632 - fscache_stat(&fscache_n_object_lookups);
39633 + fscache_stat_unchecked(&fscache_n_object_lookups);
39634 fscache_stat(&fscache_n_cop_lookup_object);
39635 ret = object->cache->ops->lookup_object(object);
39636 fscache_stat_d(&fscache_n_cop_lookup_object);
39637 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
39638 if (ret == -ETIMEDOUT) {
39639 /* probably stuck behind another object, so move this one to
39640 * the back of the queue */
39641 - fscache_stat(&fscache_n_object_lookups_timed_out);
39642 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39643 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39644 }
39645
39646 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
39647
39648 spin_lock(&object->lock);
39649 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39650 - fscache_stat(&fscache_n_object_lookups_negative);
39651 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39652
39653 /* transit here to allow write requests to begin stacking up
39654 * and read requests to begin returning ENODATA */
39655 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
39656 * result, in which case there may be data available */
39657 spin_lock(&object->lock);
39658 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39659 - fscache_stat(&fscache_n_object_lookups_positive);
39660 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39661
39662 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39663
39664 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
39665 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39666 } else {
39667 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39668 - fscache_stat(&fscache_n_object_created);
39669 + fscache_stat_unchecked(&fscache_n_object_created);
39670
39671 object->state = FSCACHE_OBJECT_AVAILABLE;
39672 spin_unlock(&object->lock);
39673 @@ -602,7 +602,7 @@ static void fscache_object_available(str
39674 fscache_enqueue_dependents(object);
39675
39676 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39677 - fscache_stat(&fscache_n_object_avail);
39678 + fscache_stat_unchecked(&fscache_n_object_avail);
39679
39680 _leave("");
39681 }
39682 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39683 enum fscache_checkaux result;
39684
39685 if (!object->cookie->def->check_aux) {
39686 - fscache_stat(&fscache_n_checkaux_none);
39687 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39688 return FSCACHE_CHECKAUX_OKAY;
39689 }
39690
39691 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39692 switch (result) {
39693 /* entry okay as is */
39694 case FSCACHE_CHECKAUX_OKAY:
39695 - fscache_stat(&fscache_n_checkaux_okay);
39696 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39697 break;
39698
39699 /* entry requires update */
39700 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39701 - fscache_stat(&fscache_n_checkaux_update);
39702 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39703 break;
39704
39705 /* entry requires deletion */
39706 case FSCACHE_CHECKAUX_OBSOLETE:
39707 - fscache_stat(&fscache_n_checkaux_obsolete);
39708 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39709 break;
39710
39711 default:
39712 diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
39713 --- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
39714 +++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
39715 @@ -17,7 +17,7 @@
39716 #include <linux/slab.h>
39717 #include "internal.h"
39718
39719 -atomic_t fscache_op_debug_id;
39720 +atomic_unchecked_t fscache_op_debug_id;
39721 EXPORT_SYMBOL(fscache_op_debug_id);
39722
39723 /**
39724 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
39725 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39726 ASSERTCMP(atomic_read(&op->usage), >, 0);
39727
39728 - fscache_stat(&fscache_n_op_enqueue);
39729 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39730 switch (op->flags & FSCACHE_OP_TYPE) {
39731 case FSCACHE_OP_ASYNC:
39732 _debug("queue async");
39733 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
39734 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39735 if (op->processor)
39736 fscache_enqueue_operation(op);
39737 - fscache_stat(&fscache_n_op_run);
39738 + fscache_stat_unchecked(&fscache_n_op_run);
39739 }
39740
39741 /*
39742 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
39743 if (object->n_ops > 1) {
39744 atomic_inc(&op->usage);
39745 list_add_tail(&op->pend_link, &object->pending_ops);
39746 - fscache_stat(&fscache_n_op_pend);
39747 + fscache_stat_unchecked(&fscache_n_op_pend);
39748 } else if (!list_empty(&object->pending_ops)) {
39749 atomic_inc(&op->usage);
39750 list_add_tail(&op->pend_link, &object->pending_ops);
39751 - fscache_stat(&fscache_n_op_pend);
39752 + fscache_stat_unchecked(&fscache_n_op_pend);
39753 fscache_start_operations(object);
39754 } else {
39755 ASSERTCMP(object->n_in_progress, ==, 0);
39756 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
39757 object->n_exclusive++; /* reads and writes must wait */
39758 atomic_inc(&op->usage);
39759 list_add_tail(&op->pend_link, &object->pending_ops);
39760 - fscache_stat(&fscache_n_op_pend);
39761 + fscache_stat_unchecked(&fscache_n_op_pend);
39762 ret = 0;
39763 } else {
39764 /* not allowed to submit ops in any other state */
39765 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
39766 if (object->n_exclusive > 0) {
39767 atomic_inc(&op->usage);
39768 list_add_tail(&op->pend_link, &object->pending_ops);
39769 - fscache_stat(&fscache_n_op_pend);
39770 + fscache_stat_unchecked(&fscache_n_op_pend);
39771 } else if (!list_empty(&object->pending_ops)) {
39772 atomic_inc(&op->usage);
39773 list_add_tail(&op->pend_link, &object->pending_ops);
39774 - fscache_stat(&fscache_n_op_pend);
39775 + fscache_stat_unchecked(&fscache_n_op_pend);
39776 fscache_start_operations(object);
39777 } else {
39778 ASSERTCMP(object->n_exclusive, ==, 0);
39779 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
39780 object->n_ops++;
39781 atomic_inc(&op->usage);
39782 list_add_tail(&op->pend_link, &object->pending_ops);
39783 - fscache_stat(&fscache_n_op_pend);
39784 + fscache_stat_unchecked(&fscache_n_op_pend);
39785 ret = 0;
39786 } else if (object->state == FSCACHE_OBJECT_DYING ||
39787 object->state == FSCACHE_OBJECT_LC_DYING ||
39788 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39789 - fscache_stat(&fscache_n_op_rejected);
39790 + fscache_stat_unchecked(&fscache_n_op_rejected);
39791 ret = -ENOBUFS;
39792 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39793 fscache_report_unexpected_submission(object, op, ostate);
39794 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
39795
39796 ret = -EBUSY;
39797 if (!list_empty(&op->pend_link)) {
39798 - fscache_stat(&fscache_n_op_cancelled);
39799 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39800 list_del_init(&op->pend_link);
39801 object->n_ops--;
39802 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39803 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
39804 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39805 BUG();
39806
39807 - fscache_stat(&fscache_n_op_release);
39808 + fscache_stat_unchecked(&fscache_n_op_release);
39809
39810 if (op->release) {
39811 op->release(op);
39812 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
39813 * lock, and defer it otherwise */
39814 if (!spin_trylock(&object->lock)) {
39815 _debug("defer put");
39816 - fscache_stat(&fscache_n_op_deferred_release);
39817 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39818
39819 cache = object->cache;
39820 spin_lock(&cache->op_gc_list_lock);
39821 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
39822
39823 _debug("GC DEFERRED REL OBJ%x OP%x",
39824 object->debug_id, op->debug_id);
39825 - fscache_stat(&fscache_n_op_gc);
39826 + fscache_stat_unchecked(&fscache_n_op_gc);
39827
39828 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39829
39830 diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
39831 --- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
39832 +++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
39833 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
39834 val = radix_tree_lookup(&cookie->stores, page->index);
39835 if (!val) {
39836 rcu_read_unlock();
39837 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39838 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39839 __fscache_uncache_page(cookie, page);
39840 return true;
39841 }
39842 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
39843 spin_unlock(&cookie->stores_lock);
39844
39845 if (xpage) {
39846 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39847 - fscache_stat(&fscache_n_store_radix_deletes);
39848 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39849 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39850 ASSERTCMP(xpage, ==, page);
39851 } else {
39852 - fscache_stat(&fscache_n_store_vmscan_gone);
39853 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39854 }
39855
39856 wake_up_bit(&cookie->flags, 0);
39857 @@ -107,7 +107,7 @@ page_busy:
39858 /* we might want to wait here, but that could deadlock the allocator as
39859 * the work threads writing to the cache may all end up sleeping
39860 * on memory allocation */
39861 - fscache_stat(&fscache_n_store_vmscan_busy);
39862 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39863 return false;
39864 }
39865 EXPORT_SYMBOL(__fscache_maybe_release_page);
39866 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
39867 FSCACHE_COOKIE_STORING_TAG);
39868 if (!radix_tree_tag_get(&cookie->stores, page->index,
39869 FSCACHE_COOKIE_PENDING_TAG)) {
39870 - fscache_stat(&fscache_n_store_radix_deletes);
39871 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39872 xpage = radix_tree_delete(&cookie->stores, page->index);
39873 }
39874 spin_unlock(&cookie->stores_lock);
39875 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
39876
39877 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39878
39879 - fscache_stat(&fscache_n_attr_changed_calls);
39880 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39881
39882 if (fscache_object_is_active(object)) {
39883 fscache_stat(&fscache_n_cop_attr_changed);
39884 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
39885
39886 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39887
39888 - fscache_stat(&fscache_n_attr_changed);
39889 + fscache_stat_unchecked(&fscache_n_attr_changed);
39890
39891 op = kzalloc(sizeof(*op), GFP_KERNEL);
39892 if (!op) {
39893 - fscache_stat(&fscache_n_attr_changed_nomem);
39894 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39895 _leave(" = -ENOMEM");
39896 return -ENOMEM;
39897 }
39898 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
39899 if (fscache_submit_exclusive_op(object, op) < 0)
39900 goto nobufs;
39901 spin_unlock(&cookie->lock);
39902 - fscache_stat(&fscache_n_attr_changed_ok);
39903 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39904 fscache_put_operation(op);
39905 _leave(" = 0");
39906 return 0;
39907 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
39908 nobufs:
39909 spin_unlock(&cookie->lock);
39910 kfree(op);
39911 - fscache_stat(&fscache_n_attr_changed_nobufs);
39912 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39913 _leave(" = %d", -ENOBUFS);
39914 return -ENOBUFS;
39915 }
39916 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
39917 /* allocate a retrieval operation and attempt to submit it */
39918 op = kzalloc(sizeof(*op), GFP_NOIO);
39919 if (!op) {
39920 - fscache_stat(&fscache_n_retrievals_nomem);
39921 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39922 return NULL;
39923 }
39924
39925 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
39926 return 0;
39927 }
39928
39929 - fscache_stat(&fscache_n_retrievals_wait);
39930 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39931
39932 jif = jiffies;
39933 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39934 fscache_wait_bit_interruptible,
39935 TASK_INTERRUPTIBLE) != 0) {
39936 - fscache_stat(&fscache_n_retrievals_intr);
39937 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39938 _leave(" = -ERESTARTSYS");
39939 return -ERESTARTSYS;
39940 }
39941 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
39942 */
39943 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39944 struct fscache_retrieval *op,
39945 - atomic_t *stat_op_waits,
39946 - atomic_t *stat_object_dead)
39947 + atomic_unchecked_t *stat_op_waits,
39948 + atomic_unchecked_t *stat_object_dead)
39949 {
39950 int ret;
39951
39952 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
39953 goto check_if_dead;
39954
39955 _debug(">>> WT");
39956 - fscache_stat(stat_op_waits);
39957 + fscache_stat_unchecked(stat_op_waits);
39958 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39959 fscache_wait_bit_interruptible,
39960 TASK_INTERRUPTIBLE) < 0) {
39961 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
39962
39963 check_if_dead:
39964 if (unlikely(fscache_object_is_dead(object))) {
39965 - fscache_stat(stat_object_dead);
39966 + fscache_stat_unchecked(stat_object_dead);
39967 return -ENOBUFS;
39968 }
39969 return 0;
39970 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
39971
39972 _enter("%p,%p,,,", cookie, page);
39973
39974 - fscache_stat(&fscache_n_retrievals);
39975 + fscache_stat_unchecked(&fscache_n_retrievals);
39976
39977 if (hlist_empty(&cookie->backing_objects))
39978 goto nobufs;
39979 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
39980 goto nobufs_unlock;
39981 spin_unlock(&cookie->lock);
39982
39983 - fscache_stat(&fscache_n_retrieval_ops);
39984 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39985
39986 /* pin the netfs read context in case we need to do the actual netfs
39987 * read because we've encountered a cache read failure */
39988 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
39989
39990 error:
39991 if (ret == -ENOMEM)
39992 - fscache_stat(&fscache_n_retrievals_nomem);
39993 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39994 else if (ret == -ERESTARTSYS)
39995 - fscache_stat(&fscache_n_retrievals_intr);
39996 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39997 else if (ret == -ENODATA)
39998 - fscache_stat(&fscache_n_retrievals_nodata);
39999 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40000 else if (ret < 0)
40001 - fscache_stat(&fscache_n_retrievals_nobufs);
40002 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40003 else
40004 - fscache_stat(&fscache_n_retrievals_ok);
40005 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40006
40007 fscache_put_retrieval(op);
40008 _leave(" = %d", ret);
40009 @@ -429,7 +429,7 @@ nobufs_unlock:
40010 spin_unlock(&cookie->lock);
40011 kfree(op);
40012 nobufs:
40013 - fscache_stat(&fscache_n_retrievals_nobufs);
40014 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40015 _leave(" = -ENOBUFS");
40016 return -ENOBUFS;
40017 }
40018 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40019
40020 _enter("%p,,%d,,,", cookie, *nr_pages);
40021
40022 - fscache_stat(&fscache_n_retrievals);
40023 + fscache_stat_unchecked(&fscache_n_retrievals);
40024
40025 if (hlist_empty(&cookie->backing_objects))
40026 goto nobufs;
40027 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40028 goto nobufs_unlock;
40029 spin_unlock(&cookie->lock);
40030
40031 - fscache_stat(&fscache_n_retrieval_ops);
40032 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
40033
40034 /* pin the netfs read context in case we need to do the actual netfs
40035 * read because we've encountered a cache read failure */
40036 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40037
40038 error:
40039 if (ret == -ENOMEM)
40040 - fscache_stat(&fscache_n_retrievals_nomem);
40041 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40042 else if (ret == -ERESTARTSYS)
40043 - fscache_stat(&fscache_n_retrievals_intr);
40044 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40045 else if (ret == -ENODATA)
40046 - fscache_stat(&fscache_n_retrievals_nodata);
40047 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40048 else if (ret < 0)
40049 - fscache_stat(&fscache_n_retrievals_nobufs);
40050 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40051 else
40052 - fscache_stat(&fscache_n_retrievals_ok);
40053 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40054
40055 fscache_put_retrieval(op);
40056 _leave(" = %d", ret);
40057 @@ -545,7 +545,7 @@ nobufs_unlock:
40058 spin_unlock(&cookie->lock);
40059 kfree(op);
40060 nobufs:
40061 - fscache_stat(&fscache_n_retrievals_nobufs);
40062 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40063 _leave(" = -ENOBUFS");
40064 return -ENOBUFS;
40065 }
40066 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40067
40068 _enter("%p,%p,,,", cookie, page);
40069
40070 - fscache_stat(&fscache_n_allocs);
40071 + fscache_stat_unchecked(&fscache_n_allocs);
40072
40073 if (hlist_empty(&cookie->backing_objects))
40074 goto nobufs;
40075 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40076 goto nobufs_unlock;
40077 spin_unlock(&cookie->lock);
40078
40079 - fscache_stat(&fscache_n_alloc_ops);
40080 + fscache_stat_unchecked(&fscache_n_alloc_ops);
40081
40082 ret = fscache_wait_for_retrieval_activation(
40083 object, op,
40084 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40085
40086 error:
40087 if (ret == -ERESTARTSYS)
40088 - fscache_stat(&fscache_n_allocs_intr);
40089 + fscache_stat_unchecked(&fscache_n_allocs_intr);
40090 else if (ret < 0)
40091 - fscache_stat(&fscache_n_allocs_nobufs);
40092 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40093 else
40094 - fscache_stat(&fscache_n_allocs_ok);
40095 + fscache_stat_unchecked(&fscache_n_allocs_ok);
40096
40097 fscache_put_retrieval(op);
40098 _leave(" = %d", ret);
40099 @@ -625,7 +625,7 @@ nobufs_unlock:
40100 spin_unlock(&cookie->lock);
40101 kfree(op);
40102 nobufs:
40103 - fscache_stat(&fscache_n_allocs_nobufs);
40104 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40105 _leave(" = -ENOBUFS");
40106 return -ENOBUFS;
40107 }
40108 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40109
40110 spin_lock(&cookie->stores_lock);
40111
40112 - fscache_stat(&fscache_n_store_calls);
40113 + fscache_stat_unchecked(&fscache_n_store_calls);
40114
40115 /* find a page to store */
40116 page = NULL;
40117 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40118 page = results[0];
40119 _debug("gang %d [%lx]", n, page->index);
40120 if (page->index > op->store_limit) {
40121 - fscache_stat(&fscache_n_store_pages_over_limit);
40122 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40123 goto superseded;
40124 }
40125
40126 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40127 spin_unlock(&cookie->stores_lock);
40128 spin_unlock(&object->lock);
40129
40130 - fscache_stat(&fscache_n_store_pages);
40131 + fscache_stat_unchecked(&fscache_n_store_pages);
40132 fscache_stat(&fscache_n_cop_write_page);
40133 ret = object->cache->ops->write_page(op, page);
40134 fscache_stat_d(&fscache_n_cop_write_page);
40135 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40136 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40137 ASSERT(PageFsCache(page));
40138
40139 - fscache_stat(&fscache_n_stores);
40140 + fscache_stat_unchecked(&fscache_n_stores);
40141
40142 op = kzalloc(sizeof(*op), GFP_NOIO);
40143 if (!op)
40144 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40145 spin_unlock(&cookie->stores_lock);
40146 spin_unlock(&object->lock);
40147
40148 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40149 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40150 op->store_limit = object->store_limit;
40151
40152 if (fscache_submit_op(object, &op->op) < 0)
40153 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40154
40155 spin_unlock(&cookie->lock);
40156 radix_tree_preload_end();
40157 - fscache_stat(&fscache_n_store_ops);
40158 - fscache_stat(&fscache_n_stores_ok);
40159 + fscache_stat_unchecked(&fscache_n_store_ops);
40160 + fscache_stat_unchecked(&fscache_n_stores_ok);
40161
40162 /* the work queue now carries its own ref on the object */
40163 fscache_put_operation(&op->op);
40164 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40165 return 0;
40166
40167 already_queued:
40168 - fscache_stat(&fscache_n_stores_again);
40169 + fscache_stat_unchecked(&fscache_n_stores_again);
40170 already_pending:
40171 spin_unlock(&cookie->stores_lock);
40172 spin_unlock(&object->lock);
40173 spin_unlock(&cookie->lock);
40174 radix_tree_preload_end();
40175 kfree(op);
40176 - fscache_stat(&fscache_n_stores_ok);
40177 + fscache_stat_unchecked(&fscache_n_stores_ok);
40178 _leave(" = 0");
40179 return 0;
40180
40181 @@ -851,14 +851,14 @@ nobufs:
40182 spin_unlock(&cookie->lock);
40183 radix_tree_preload_end();
40184 kfree(op);
40185 - fscache_stat(&fscache_n_stores_nobufs);
40186 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
40187 _leave(" = -ENOBUFS");
40188 return -ENOBUFS;
40189
40190 nomem_free:
40191 kfree(op);
40192 nomem:
40193 - fscache_stat(&fscache_n_stores_oom);
40194 + fscache_stat_unchecked(&fscache_n_stores_oom);
40195 _leave(" = -ENOMEM");
40196 return -ENOMEM;
40197 }
40198 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40199 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40200 ASSERTCMP(page, !=, NULL);
40201
40202 - fscache_stat(&fscache_n_uncaches);
40203 + fscache_stat_unchecked(&fscache_n_uncaches);
40204
40205 /* cache withdrawal may beat us to it */
40206 if (!PageFsCache(page))
40207 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40208 unsigned long loop;
40209
40210 #ifdef CONFIG_FSCACHE_STATS
40211 - atomic_add(pagevec->nr, &fscache_n_marks);
40212 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40213 #endif
40214
40215 for (loop = 0; loop < pagevec->nr; loop++) {
40216 diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40217 --- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
40218 +++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
40219 @@ -18,95 +18,95 @@
40220 /*
40221 * operation counters
40222 */
40223 -atomic_t fscache_n_op_pend;
40224 -atomic_t fscache_n_op_run;
40225 -atomic_t fscache_n_op_enqueue;
40226 -atomic_t fscache_n_op_requeue;
40227 -atomic_t fscache_n_op_deferred_release;
40228 -atomic_t fscache_n_op_release;
40229 -atomic_t fscache_n_op_gc;
40230 -atomic_t fscache_n_op_cancelled;
40231 -atomic_t fscache_n_op_rejected;
40232 -
40233 -atomic_t fscache_n_attr_changed;
40234 -atomic_t fscache_n_attr_changed_ok;
40235 -atomic_t fscache_n_attr_changed_nobufs;
40236 -atomic_t fscache_n_attr_changed_nomem;
40237 -atomic_t fscache_n_attr_changed_calls;
40238 -
40239 -atomic_t fscache_n_allocs;
40240 -atomic_t fscache_n_allocs_ok;
40241 -atomic_t fscache_n_allocs_wait;
40242 -atomic_t fscache_n_allocs_nobufs;
40243 -atomic_t fscache_n_allocs_intr;
40244 -atomic_t fscache_n_allocs_object_dead;
40245 -atomic_t fscache_n_alloc_ops;
40246 -atomic_t fscache_n_alloc_op_waits;
40247 -
40248 -atomic_t fscache_n_retrievals;
40249 -atomic_t fscache_n_retrievals_ok;
40250 -atomic_t fscache_n_retrievals_wait;
40251 -atomic_t fscache_n_retrievals_nodata;
40252 -atomic_t fscache_n_retrievals_nobufs;
40253 -atomic_t fscache_n_retrievals_intr;
40254 -atomic_t fscache_n_retrievals_nomem;
40255 -atomic_t fscache_n_retrievals_object_dead;
40256 -atomic_t fscache_n_retrieval_ops;
40257 -atomic_t fscache_n_retrieval_op_waits;
40258 -
40259 -atomic_t fscache_n_stores;
40260 -atomic_t fscache_n_stores_ok;
40261 -atomic_t fscache_n_stores_again;
40262 -atomic_t fscache_n_stores_nobufs;
40263 -atomic_t fscache_n_stores_oom;
40264 -atomic_t fscache_n_store_ops;
40265 -atomic_t fscache_n_store_calls;
40266 -atomic_t fscache_n_store_pages;
40267 -atomic_t fscache_n_store_radix_deletes;
40268 -atomic_t fscache_n_store_pages_over_limit;
40269 -
40270 -atomic_t fscache_n_store_vmscan_not_storing;
40271 -atomic_t fscache_n_store_vmscan_gone;
40272 -atomic_t fscache_n_store_vmscan_busy;
40273 -atomic_t fscache_n_store_vmscan_cancelled;
40274 -
40275 -atomic_t fscache_n_marks;
40276 -atomic_t fscache_n_uncaches;
40277 -
40278 -atomic_t fscache_n_acquires;
40279 -atomic_t fscache_n_acquires_null;
40280 -atomic_t fscache_n_acquires_no_cache;
40281 -atomic_t fscache_n_acquires_ok;
40282 -atomic_t fscache_n_acquires_nobufs;
40283 -atomic_t fscache_n_acquires_oom;
40284 -
40285 -atomic_t fscache_n_updates;
40286 -atomic_t fscache_n_updates_null;
40287 -atomic_t fscache_n_updates_run;
40288 -
40289 -atomic_t fscache_n_relinquishes;
40290 -atomic_t fscache_n_relinquishes_null;
40291 -atomic_t fscache_n_relinquishes_waitcrt;
40292 -atomic_t fscache_n_relinquishes_retire;
40293 -
40294 -atomic_t fscache_n_cookie_index;
40295 -atomic_t fscache_n_cookie_data;
40296 -atomic_t fscache_n_cookie_special;
40297 -
40298 -atomic_t fscache_n_object_alloc;
40299 -atomic_t fscache_n_object_no_alloc;
40300 -atomic_t fscache_n_object_lookups;
40301 -atomic_t fscache_n_object_lookups_negative;
40302 -atomic_t fscache_n_object_lookups_positive;
40303 -atomic_t fscache_n_object_lookups_timed_out;
40304 -atomic_t fscache_n_object_created;
40305 -atomic_t fscache_n_object_avail;
40306 -atomic_t fscache_n_object_dead;
40307 -
40308 -atomic_t fscache_n_checkaux_none;
40309 -atomic_t fscache_n_checkaux_okay;
40310 -atomic_t fscache_n_checkaux_update;
40311 -atomic_t fscache_n_checkaux_obsolete;
40312 +atomic_unchecked_t fscache_n_op_pend;
40313 +atomic_unchecked_t fscache_n_op_run;
40314 +atomic_unchecked_t fscache_n_op_enqueue;
40315 +atomic_unchecked_t fscache_n_op_requeue;
40316 +atomic_unchecked_t fscache_n_op_deferred_release;
40317 +atomic_unchecked_t fscache_n_op_release;
40318 +atomic_unchecked_t fscache_n_op_gc;
40319 +atomic_unchecked_t fscache_n_op_cancelled;
40320 +atomic_unchecked_t fscache_n_op_rejected;
40321 +
40322 +atomic_unchecked_t fscache_n_attr_changed;
40323 +atomic_unchecked_t fscache_n_attr_changed_ok;
40324 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
40325 +atomic_unchecked_t fscache_n_attr_changed_nomem;
40326 +atomic_unchecked_t fscache_n_attr_changed_calls;
40327 +
40328 +atomic_unchecked_t fscache_n_allocs;
40329 +atomic_unchecked_t fscache_n_allocs_ok;
40330 +atomic_unchecked_t fscache_n_allocs_wait;
40331 +atomic_unchecked_t fscache_n_allocs_nobufs;
40332 +atomic_unchecked_t fscache_n_allocs_intr;
40333 +atomic_unchecked_t fscache_n_allocs_object_dead;
40334 +atomic_unchecked_t fscache_n_alloc_ops;
40335 +atomic_unchecked_t fscache_n_alloc_op_waits;
40336 +
40337 +atomic_unchecked_t fscache_n_retrievals;
40338 +atomic_unchecked_t fscache_n_retrievals_ok;
40339 +atomic_unchecked_t fscache_n_retrievals_wait;
40340 +atomic_unchecked_t fscache_n_retrievals_nodata;
40341 +atomic_unchecked_t fscache_n_retrievals_nobufs;
40342 +atomic_unchecked_t fscache_n_retrievals_intr;
40343 +atomic_unchecked_t fscache_n_retrievals_nomem;
40344 +atomic_unchecked_t fscache_n_retrievals_object_dead;
40345 +atomic_unchecked_t fscache_n_retrieval_ops;
40346 +atomic_unchecked_t fscache_n_retrieval_op_waits;
40347 +
40348 +atomic_unchecked_t fscache_n_stores;
40349 +atomic_unchecked_t fscache_n_stores_ok;
40350 +atomic_unchecked_t fscache_n_stores_again;
40351 +atomic_unchecked_t fscache_n_stores_nobufs;
40352 +atomic_unchecked_t fscache_n_stores_oom;
40353 +atomic_unchecked_t fscache_n_store_ops;
40354 +atomic_unchecked_t fscache_n_store_calls;
40355 +atomic_unchecked_t fscache_n_store_pages;
40356 +atomic_unchecked_t fscache_n_store_radix_deletes;
40357 +atomic_unchecked_t fscache_n_store_pages_over_limit;
40358 +
40359 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40360 +atomic_unchecked_t fscache_n_store_vmscan_gone;
40361 +atomic_unchecked_t fscache_n_store_vmscan_busy;
40362 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40363 +
40364 +atomic_unchecked_t fscache_n_marks;
40365 +atomic_unchecked_t fscache_n_uncaches;
40366 +
40367 +atomic_unchecked_t fscache_n_acquires;
40368 +atomic_unchecked_t fscache_n_acquires_null;
40369 +atomic_unchecked_t fscache_n_acquires_no_cache;
40370 +atomic_unchecked_t fscache_n_acquires_ok;
40371 +atomic_unchecked_t fscache_n_acquires_nobufs;
40372 +atomic_unchecked_t fscache_n_acquires_oom;
40373 +
40374 +atomic_unchecked_t fscache_n_updates;
40375 +atomic_unchecked_t fscache_n_updates_null;
40376 +atomic_unchecked_t fscache_n_updates_run;
40377 +
40378 +atomic_unchecked_t fscache_n_relinquishes;
40379 +atomic_unchecked_t fscache_n_relinquishes_null;
40380 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40381 +atomic_unchecked_t fscache_n_relinquishes_retire;
40382 +
40383 +atomic_unchecked_t fscache_n_cookie_index;
40384 +atomic_unchecked_t fscache_n_cookie_data;
40385 +atomic_unchecked_t fscache_n_cookie_special;
40386 +
40387 +atomic_unchecked_t fscache_n_object_alloc;
40388 +atomic_unchecked_t fscache_n_object_no_alloc;
40389 +atomic_unchecked_t fscache_n_object_lookups;
40390 +atomic_unchecked_t fscache_n_object_lookups_negative;
40391 +atomic_unchecked_t fscache_n_object_lookups_positive;
40392 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
40393 +atomic_unchecked_t fscache_n_object_created;
40394 +atomic_unchecked_t fscache_n_object_avail;
40395 +atomic_unchecked_t fscache_n_object_dead;
40396 +
40397 +atomic_unchecked_t fscache_n_checkaux_none;
40398 +atomic_unchecked_t fscache_n_checkaux_okay;
40399 +atomic_unchecked_t fscache_n_checkaux_update;
40400 +atomic_unchecked_t fscache_n_checkaux_obsolete;
40401
40402 atomic_t fscache_n_cop_alloc_object;
40403 atomic_t fscache_n_cop_lookup_object;
40404 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40405 seq_puts(m, "FS-Cache statistics\n");
40406
40407 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40408 - atomic_read(&fscache_n_cookie_index),
40409 - atomic_read(&fscache_n_cookie_data),
40410 - atomic_read(&fscache_n_cookie_special));
40411 + atomic_read_unchecked(&fscache_n_cookie_index),
40412 + atomic_read_unchecked(&fscache_n_cookie_data),
40413 + atomic_read_unchecked(&fscache_n_cookie_special));
40414
40415 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40416 - atomic_read(&fscache_n_object_alloc),
40417 - atomic_read(&fscache_n_object_no_alloc),
40418 - atomic_read(&fscache_n_object_avail),
40419 - atomic_read(&fscache_n_object_dead));
40420 + atomic_read_unchecked(&fscache_n_object_alloc),
40421 + atomic_read_unchecked(&fscache_n_object_no_alloc),
40422 + atomic_read_unchecked(&fscache_n_object_avail),
40423 + atomic_read_unchecked(&fscache_n_object_dead));
40424 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40425 - atomic_read(&fscache_n_checkaux_none),
40426 - atomic_read(&fscache_n_checkaux_okay),
40427 - atomic_read(&fscache_n_checkaux_update),
40428 - atomic_read(&fscache_n_checkaux_obsolete));
40429 + atomic_read_unchecked(&fscache_n_checkaux_none),
40430 + atomic_read_unchecked(&fscache_n_checkaux_okay),
40431 + atomic_read_unchecked(&fscache_n_checkaux_update),
40432 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40433
40434 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40435 - atomic_read(&fscache_n_marks),
40436 - atomic_read(&fscache_n_uncaches));
40437 + atomic_read_unchecked(&fscache_n_marks),
40438 + atomic_read_unchecked(&fscache_n_uncaches));
40439
40440 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40441 " oom=%u\n",
40442 - atomic_read(&fscache_n_acquires),
40443 - atomic_read(&fscache_n_acquires_null),
40444 - atomic_read(&fscache_n_acquires_no_cache),
40445 - atomic_read(&fscache_n_acquires_ok),
40446 - atomic_read(&fscache_n_acquires_nobufs),
40447 - atomic_read(&fscache_n_acquires_oom));
40448 + atomic_read_unchecked(&fscache_n_acquires),
40449 + atomic_read_unchecked(&fscache_n_acquires_null),
40450 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
40451 + atomic_read_unchecked(&fscache_n_acquires_ok),
40452 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
40453 + atomic_read_unchecked(&fscache_n_acquires_oom));
40454
40455 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40456 - atomic_read(&fscache_n_object_lookups),
40457 - atomic_read(&fscache_n_object_lookups_negative),
40458 - atomic_read(&fscache_n_object_lookups_positive),
40459 - atomic_read(&fscache_n_object_created),
40460 - atomic_read(&fscache_n_object_lookups_timed_out));
40461 + atomic_read_unchecked(&fscache_n_object_lookups),
40462 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
40463 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
40464 + atomic_read_unchecked(&fscache_n_object_created),
40465 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
40466
40467 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40468 - atomic_read(&fscache_n_updates),
40469 - atomic_read(&fscache_n_updates_null),
40470 - atomic_read(&fscache_n_updates_run));
40471 + atomic_read_unchecked(&fscache_n_updates),
40472 + atomic_read_unchecked(&fscache_n_updates_null),
40473 + atomic_read_unchecked(&fscache_n_updates_run));
40474
40475 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40476 - atomic_read(&fscache_n_relinquishes),
40477 - atomic_read(&fscache_n_relinquishes_null),
40478 - atomic_read(&fscache_n_relinquishes_waitcrt),
40479 - atomic_read(&fscache_n_relinquishes_retire));
40480 + atomic_read_unchecked(&fscache_n_relinquishes),
40481 + atomic_read_unchecked(&fscache_n_relinquishes_null),
40482 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40483 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
40484
40485 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40486 - atomic_read(&fscache_n_attr_changed),
40487 - atomic_read(&fscache_n_attr_changed_ok),
40488 - atomic_read(&fscache_n_attr_changed_nobufs),
40489 - atomic_read(&fscache_n_attr_changed_nomem),
40490 - atomic_read(&fscache_n_attr_changed_calls));
40491 + atomic_read_unchecked(&fscache_n_attr_changed),
40492 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
40493 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40494 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40495 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
40496
40497 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40498 - atomic_read(&fscache_n_allocs),
40499 - atomic_read(&fscache_n_allocs_ok),
40500 - atomic_read(&fscache_n_allocs_wait),
40501 - atomic_read(&fscache_n_allocs_nobufs),
40502 - atomic_read(&fscache_n_allocs_intr));
40503 + atomic_read_unchecked(&fscache_n_allocs),
40504 + atomic_read_unchecked(&fscache_n_allocs_ok),
40505 + atomic_read_unchecked(&fscache_n_allocs_wait),
40506 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
40507 + atomic_read_unchecked(&fscache_n_allocs_intr));
40508 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40509 - atomic_read(&fscache_n_alloc_ops),
40510 - atomic_read(&fscache_n_alloc_op_waits),
40511 - atomic_read(&fscache_n_allocs_object_dead));
40512 + atomic_read_unchecked(&fscache_n_alloc_ops),
40513 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
40514 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
40515
40516 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40517 " int=%u oom=%u\n",
40518 - atomic_read(&fscache_n_retrievals),
40519 - atomic_read(&fscache_n_retrievals_ok),
40520 - atomic_read(&fscache_n_retrievals_wait),
40521 - atomic_read(&fscache_n_retrievals_nodata),
40522 - atomic_read(&fscache_n_retrievals_nobufs),
40523 - atomic_read(&fscache_n_retrievals_intr),
40524 - atomic_read(&fscache_n_retrievals_nomem));
40525 + atomic_read_unchecked(&fscache_n_retrievals),
40526 + atomic_read_unchecked(&fscache_n_retrievals_ok),
40527 + atomic_read_unchecked(&fscache_n_retrievals_wait),
40528 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
40529 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40530 + atomic_read_unchecked(&fscache_n_retrievals_intr),
40531 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
40532 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40533 - atomic_read(&fscache_n_retrieval_ops),
40534 - atomic_read(&fscache_n_retrieval_op_waits),
40535 - atomic_read(&fscache_n_retrievals_object_dead));
40536 + atomic_read_unchecked(&fscache_n_retrieval_ops),
40537 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40538 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40539
40540 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40541 - atomic_read(&fscache_n_stores),
40542 - atomic_read(&fscache_n_stores_ok),
40543 - atomic_read(&fscache_n_stores_again),
40544 - atomic_read(&fscache_n_stores_nobufs),
40545 - atomic_read(&fscache_n_stores_oom));
40546 + atomic_read_unchecked(&fscache_n_stores),
40547 + atomic_read_unchecked(&fscache_n_stores_ok),
40548 + atomic_read_unchecked(&fscache_n_stores_again),
40549 + atomic_read_unchecked(&fscache_n_stores_nobufs),
40550 + atomic_read_unchecked(&fscache_n_stores_oom));
40551 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40552 - atomic_read(&fscache_n_store_ops),
40553 - atomic_read(&fscache_n_store_calls),
40554 - atomic_read(&fscache_n_store_pages),
40555 - atomic_read(&fscache_n_store_radix_deletes),
40556 - atomic_read(&fscache_n_store_pages_over_limit));
40557 + atomic_read_unchecked(&fscache_n_store_ops),
40558 + atomic_read_unchecked(&fscache_n_store_calls),
40559 + atomic_read_unchecked(&fscache_n_store_pages),
40560 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
40561 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40562
40563 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40564 - atomic_read(&fscache_n_store_vmscan_not_storing),
40565 - atomic_read(&fscache_n_store_vmscan_gone),
40566 - atomic_read(&fscache_n_store_vmscan_busy),
40567 - atomic_read(&fscache_n_store_vmscan_cancelled));
40568 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40569 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40570 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40571 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40572
40573 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40574 - atomic_read(&fscache_n_op_pend),
40575 - atomic_read(&fscache_n_op_run),
40576 - atomic_read(&fscache_n_op_enqueue),
40577 - atomic_read(&fscache_n_op_cancelled),
40578 - atomic_read(&fscache_n_op_rejected));
40579 + atomic_read_unchecked(&fscache_n_op_pend),
40580 + atomic_read_unchecked(&fscache_n_op_run),
40581 + atomic_read_unchecked(&fscache_n_op_enqueue),
40582 + atomic_read_unchecked(&fscache_n_op_cancelled),
40583 + atomic_read_unchecked(&fscache_n_op_rejected));
40584 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40585 - atomic_read(&fscache_n_op_deferred_release),
40586 - atomic_read(&fscache_n_op_release),
40587 - atomic_read(&fscache_n_op_gc));
40588 + atomic_read_unchecked(&fscache_n_op_deferred_release),
40589 + atomic_read_unchecked(&fscache_n_op_release),
40590 + atomic_read_unchecked(&fscache_n_op_gc));
40591
40592 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40593 atomic_read(&fscache_n_cop_alloc_object),
40594 diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
40595 --- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
40596 +++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
40597 @@ -4,6 +4,7 @@
40598 #include <linux/path.h>
40599 #include <linux/slab.h>
40600 #include <linux/fs_struct.h>
40601 +#include <linux/grsecurity.h>
40602 #include "internal.h"
40603
40604 static inline void path_get_longterm(struct path *path)
40605 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
40606 old_root = fs->root;
40607 fs->root = *path;
40608 path_get_longterm(path);
40609 + gr_set_chroot_entries(current, path);
40610 write_seqcount_end(&fs->seq);
40611 spin_unlock(&fs->lock);
40612 if (old_root.dentry)
40613 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
40614 && fs->root.mnt == old_root->mnt) {
40615 path_get_longterm(new_root);
40616 fs->root = *new_root;
40617 + gr_set_chroot_entries(p, new_root);
40618 count++;
40619 }
40620 if (fs->pwd.dentry == old_root->dentry
40621 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
40622 spin_lock(&fs->lock);
40623 write_seqcount_begin(&fs->seq);
40624 tsk->fs = NULL;
40625 - kill = !--fs->users;
40626 + gr_clear_chroot_entries(tsk);
40627 + kill = !atomic_dec_return(&fs->users);
40628 write_seqcount_end(&fs->seq);
40629 spin_unlock(&fs->lock);
40630 task_unlock(tsk);
40631 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
40632 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40633 /* We don't need to lock fs - think why ;-) */
40634 if (fs) {
40635 - fs->users = 1;
40636 + atomic_set(&fs->users, 1);
40637 fs->in_exec = 0;
40638 spin_lock_init(&fs->lock);
40639 seqcount_init(&fs->seq);
40640 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
40641 spin_lock(&old->lock);
40642 fs->root = old->root;
40643 path_get_longterm(&fs->root);
40644 + /* instead of calling gr_set_chroot_entries here,
40645 + we call it from every caller of this function
40646 + */
40647 fs->pwd = old->pwd;
40648 path_get_longterm(&fs->pwd);
40649 spin_unlock(&old->lock);
40650 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
40651
40652 task_lock(current);
40653 spin_lock(&fs->lock);
40654 - kill = !--fs->users;
40655 + kill = !atomic_dec_return(&fs->users);
40656 current->fs = new_fs;
40657 + gr_set_chroot_entries(current, &new_fs->root);
40658 spin_unlock(&fs->lock);
40659 task_unlock(current);
40660
40661 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
40662
40663 /* to be mentioned only in INIT_TASK */
40664 struct fs_struct init_fs = {
40665 - .users = 1,
40666 + .users = ATOMIC_INIT(1),
40667 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
40668 .seq = SEQCNT_ZERO,
40669 .umask = 0022,
40670 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
40671 task_lock(current);
40672
40673 spin_lock(&init_fs.lock);
40674 - init_fs.users++;
40675 + atomic_inc(&init_fs.users);
40676 spin_unlock(&init_fs.lock);
40677
40678 spin_lock(&fs->lock);
40679 current->fs = &init_fs;
40680 - kill = !--fs->users;
40681 + gr_set_chroot_entries(current, &current->fs->root);
40682 + kill = !atomic_dec_return(&fs->users);
40683 spin_unlock(&fs->lock);
40684
40685 task_unlock(current);
40686 diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
40687 --- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
40688 +++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
40689 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
40690 INIT_LIST_HEAD(&cuse_conntbl[i]);
40691
40692 /* inherit and extend fuse_dev_operations */
40693 - cuse_channel_fops = fuse_dev_operations;
40694 - cuse_channel_fops.owner = THIS_MODULE;
40695 - cuse_channel_fops.open = cuse_channel_open;
40696 - cuse_channel_fops.release = cuse_channel_release;
40697 + pax_open_kernel();
40698 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
40699 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
40700 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
40701 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
40702 + pax_close_kernel();
40703
40704 cuse_class = class_create(THIS_MODULE, "cuse");
40705 if (IS_ERR(cuse_class))
40706 diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
40707 --- linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:14.000000000 -0400
40708 +++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
40709 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
40710 ret = 0;
40711 pipe_lock(pipe);
40712
40713 - if (!pipe->readers) {
40714 + if (!atomic_read(&pipe->readers)) {
40715 send_sig(SIGPIPE, current, 0);
40716 if (!ret)
40717 ret = -EPIPE;
40718 diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
40719 --- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
40720 +++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
40721 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
40722 return link;
40723 }
40724
40725 -static void free_link(char *link)
40726 +static void free_link(const char *link)
40727 {
40728 if (!IS_ERR(link))
40729 free_page((unsigned long) link);
40730 diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
40731 --- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
40732 +++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
40733 @@ -1525,7 +1525,7 @@ out:
40734
40735 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40736 {
40737 - char *s = nd_get_link(nd);
40738 + const char *s = nd_get_link(nd);
40739 if (!IS_ERR(s))
40740 kfree(s);
40741 }
40742 diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
40743 --- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
40744 +++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
40745 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
40746 int err;
40747 u16 type;
40748
40749 + pax_track_stack();
40750 +
40751 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40752 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40753 if (err)
40754 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
40755 int entry_size;
40756 int err;
40757
40758 + pax_track_stack();
40759 +
40760 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
40761 str->name, cnid, inode->i_nlink);
40762 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
40763 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
40764 int entry_size, type;
40765 int err = 0;
40766
40767 + pax_track_stack();
40768 +
40769 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
40770 cnid, src_dir->i_ino, src_name->name,
40771 dst_dir->i_ino, dst_name->name);
40772 diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
40773 --- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
40774 +++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
40775 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
40776 struct hfsplus_readdir_data *rd;
40777 u16 type;
40778
40779 + pax_track_stack();
40780 +
40781 if (filp->f_pos >= inode->i_size)
40782 return 0;
40783
40784 diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
40785 --- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
40786 +++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
40787 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
40788 int res = 0;
40789 u16 type;
40790
40791 + pax_track_stack();
40792 +
40793 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40794
40795 HFSPLUS_I(inode)->linkid = 0;
40796 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
40797 struct hfs_find_data fd;
40798 hfsplus_cat_entry entry;
40799
40800 + pax_track_stack();
40801 +
40802 if (HFSPLUS_IS_RSRC(inode))
40803 main_inode = HFSPLUS_I(inode)->rsrc_inode;
40804
40805 diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
40806 --- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40807 +++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
40808 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
40809 struct hfsplus_cat_file *file;
40810 int res;
40811
40812 + pax_track_stack();
40813 +
40814 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40815 return -EOPNOTSUPP;
40816
40817 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40818 struct hfsplus_cat_file *file;
40819 ssize_t res = 0;
40820
40821 + pax_track_stack();
40822 +
40823 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40824 return -EOPNOTSUPP;
40825
40826 diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
40827 --- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
40828 +++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
40829 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
40830 struct nls_table *nls = NULL;
40831 int err;
40832
40833 + pax_track_stack();
40834 +
40835 err = -EINVAL;
40836 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40837 if (!sbi)
40838 diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
40839 --- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
40840 +++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
40841 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
40842 .kill_sb = kill_litter_super,
40843 };
40844
40845 -static struct vfsmount *hugetlbfs_vfsmount;
40846 +struct vfsmount *hugetlbfs_vfsmount;
40847
40848 static int can_do_hugetlb_shm(void)
40849 {
40850 diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
40851 --- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
40852 +++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
40853 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
40854
40855 #ifdef CONFIG_SMP
40856 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
40857 - static atomic_t shared_last_ino;
40858 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
40859 + static atomic_unchecked_t shared_last_ino;
40860 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
40861
40862 res = next - LAST_INO_BATCH;
40863 }
40864 diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
40865 --- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
40866 +++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
40867 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
40868 tid_t this_tid;
40869 int result;
40870
40871 + pax_track_stack();
40872 +
40873 jbd_debug(1, "Start checkpoint\n");
40874
40875 /*
40876 diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
40877 --- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
40878 +++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
40879 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40880 int outpos = 0;
40881 int pos=0;
40882
40883 + pax_track_stack();
40884 +
40885 memset(positions,0,sizeof(positions));
40886
40887 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40888 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
40889 int outpos = 0;
40890 int pos=0;
40891
40892 + pax_track_stack();
40893 +
40894 memset(positions,0,sizeof(positions));
40895
40896 while (outpos<destlen) {
40897 diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
40898 --- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
40899 +++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
40900 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40901 int ret;
40902 uint32_t mysrclen, mydstlen;
40903
40904 + pax_track_stack();
40905 +
40906 mysrclen = *sourcelen;
40907 mydstlen = *dstlen - 8;
40908
40909 diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
40910 --- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
40911 +++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
40912 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
40913 struct jffs2_unknown_node marker = {
40914 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40915 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40916 - .totlen = cpu_to_je32(c->cleanmarker_size)
40917 + .totlen = cpu_to_je32(c->cleanmarker_size),
40918 + .hdr_crc = cpu_to_je32(0)
40919 };
40920
40921 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40922 diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
40923 --- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
40924 +++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
40925 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40926 {
40927 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40928 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40929 - .totlen = constant_cpu_to_je32(8)
40930 + .totlen = constant_cpu_to_je32(8),
40931 + .hdr_crc = constant_cpu_to_je32(0)
40932 };
40933
40934 /*
40935 diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
40936 --- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
40937 +++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
40938 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40939
40940 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40941
40942 + pax_track_stack();
40943 +
40944 /* Phase.1 : Merge same xref */
40945 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40946 xref_tmphash[i] = NULL;
40947 diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
40948 --- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
40949 +++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
40950 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
40951
40952 jfs_inode_cachep =
40953 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40954 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40955 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40956 init_once);
40957 if (jfs_inode_cachep == NULL)
40958 return -ENOMEM;
40959 diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
40960 --- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
40961 +++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
40962 @@ -86,7 +86,7 @@ config HAVE_AOUT
40963
40964 config BINFMT_AOUT
40965 tristate "Kernel support for a.out and ECOFF binaries"
40966 - depends on HAVE_AOUT
40967 + depends on HAVE_AOUT && BROKEN
40968 ---help---
40969 A.out (Assembler.OUTput) is a set of formats for libraries and
40970 executables used in the earliest versions of UNIX. Linux used
40971 diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
40972 --- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
40973 +++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
40974 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
40975
40976 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40977 struct dentry *next;
40978 + char d_name[sizeof(next->d_iname)];
40979 + const unsigned char *name;
40980 +
40981 next = list_entry(p, struct dentry, d_u.d_child);
40982 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
40983 if (!simple_positive(next)) {
40984 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
40985
40986 spin_unlock(&next->d_lock);
40987 spin_unlock(&dentry->d_lock);
40988 - if (filldir(dirent, next->d_name.name,
40989 + name = next->d_name.name;
40990 + if (name == next->d_iname) {
40991 + memcpy(d_name, name, next->d_name.len);
40992 + name = d_name;
40993 + }
40994 + if (filldir(dirent, name,
40995 next->d_name.len, filp->f_pos,
40996 next->d_inode->i_ino,
40997 dt_type(next->d_inode)) < 0)
40998 diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
40999 --- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
41000 +++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
41001 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41002 /*
41003 * Cookie counter for NLM requests
41004 */
41005 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41006 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41007
41008 void nlmclnt_next_cookie(struct nlm_cookie *c)
41009 {
41010 - u32 cookie = atomic_inc_return(&nlm_cookie);
41011 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41012
41013 memcpy(c->data, &cookie, 4);
41014 c->len=4;
41015 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41016 struct nlm_rqst reqst, *req;
41017 int status;
41018
41019 + pax_track_stack();
41020 +
41021 req = &reqst;
41022 memset(req, 0, sizeof(*req));
41023 locks_init_lock(&req->a_args.lock.fl);
41024 diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41025 --- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
41026 +++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
41027 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41028 return;
41029
41030 if (filp->f_op && filp->f_op->flock) {
41031 - struct file_lock fl = {
41032 + struct file_lock flock = {
41033 .fl_pid = current->tgid,
41034 .fl_file = filp,
41035 .fl_flags = FL_FLOCK,
41036 .fl_type = F_UNLCK,
41037 .fl_end = OFFSET_MAX,
41038 };
41039 - filp->f_op->flock(filp, F_SETLKW, &fl);
41040 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
41041 - fl.fl_ops->fl_release_private(&fl);
41042 + filp->f_op->flock(filp, F_SETLKW, &flock);
41043 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
41044 + flock.fl_ops->fl_release_private(&flock);
41045 }
41046
41047 lock_flocks();
41048 diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41049 --- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
41050 +++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
41051 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41052 struct logfs_disk_super _ds1, *ds1 = &_ds1;
41053 int err, valid0, valid1;
41054
41055 + pax_track_stack();
41056 +
41057 /* read first superblock */
41058 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41059 if (err)
41060 diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41061 --- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
41062 +++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
41063 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41064 return ret;
41065
41066 /*
41067 - * Read/write DACs are always overridable.
41068 - * Executable DACs are overridable for all directories and
41069 - * for non-directories that have least one exec bit set.
41070 + * Searching includes executable on directories, else just read.
41071 */
41072 - if (!(mask & MAY_EXEC) || execute_ok(inode))
41073 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41074 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41075 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41076 +#ifdef CONFIG_GRKERNSEC
41077 + if (flags & IPERM_FLAG_RCU)
41078 + return -ECHILD;
41079 +#endif
41080 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41081 return 0;
41082 + }
41083
41084 /*
41085 - * Searching includes executable on directories, else just read.
41086 + * Read/write DACs are always overridable.
41087 + * Executable DACs are overridable for all directories and
41088 + * for non-directories that have least one exec bit set.
41089 */
41090 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41091 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41092 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41093 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41094 +#ifdef CONFIG_GRKERNSEC
41095 + if (flags & IPERM_FLAG_RCU)
41096 + return -ECHILD;
41097 +#endif
41098 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41099 return 0;
41100 + }
41101
41102 return -EACCES;
41103 }
41104 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41105 br_read_unlock(vfsmount_lock);
41106 }
41107
41108 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41109 + return -ENOENT;
41110 +
41111 if (likely(!(nd->flags & LOOKUP_JUMPED)))
41112 return 0;
41113
41114 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
41115 if (ret == -ECHILD)
41116 return ret;
41117
41118 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41119 - ns_capable(ns, CAP_DAC_READ_SEARCH))
41120 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41121 goto ok;
41122 + else {
41123 +#ifdef CONFIG_GRKERNSEC
41124 + if (flags & IPERM_FLAG_RCU)
41125 + return -ECHILD;
41126 +#endif
41127 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41128 + goto ok;
41129 + }
41130
41131 return ret;
41132 ok:
41133 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41134 return error;
41135 }
41136
41137 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
41138 + dentry->d_inode, dentry, nd->path.mnt)) {
41139 + error = -EACCES;
41140 + *p = ERR_PTR(error); /* no ->put_link(), please */
41141 + path_put(&nd->path);
41142 + return error;
41143 + }
41144 +
41145 nd->last_type = LAST_BIND;
41146 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41147 error = PTR_ERR(*p);
41148 if (!IS_ERR(*p)) {
41149 - char *s = nd_get_link(nd);
41150 + const char *s = nd_get_link(nd);
41151 error = 0;
41152 if (s)
41153 error = __vfs_follow_link(nd, s);
41154 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41155 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41156
41157 if (likely(!retval)) {
41158 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41159 + return -ENOENT;
41160 +
41161 if (unlikely(!audit_dummy_context())) {
41162 if (nd->path.dentry && nd->inode)
41163 audit_inode(name, nd->path.dentry);
41164 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41165 return error;
41166 }
41167
41168 +/*
41169 + * Note that while the flag value (low two bits) for sys_open means:
41170 + * 00 - read-only
41171 + * 01 - write-only
41172 + * 10 - read-write
41173 + * 11 - special
41174 + * it is changed into
41175 + * 00 - no permissions needed
41176 + * 01 - read-permission
41177 + * 10 - write-permission
41178 + * 11 - read-write
41179 + * for the internal routines (ie open_namei()/follow_link() etc)
41180 + * This is more logical, and also allows the 00 "no perm needed"
41181 + * to be used for symlinks (where the permissions are checked
41182 + * later).
41183 + *
41184 +*/
41185 +static inline int open_to_namei_flags(int flag)
41186 +{
41187 + if ((flag+1) & O_ACCMODE)
41188 + flag++;
41189 + return flag;
41190 +}
41191 +
41192 static int may_open(struct path *path, int acc_mode, int flag)
41193 {
41194 struct dentry *dentry = path->dentry;
41195 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41196 /*
41197 * Ensure there are no outstanding leases on the file.
41198 */
41199 - return break_lease(inode, flag);
41200 + error = break_lease(inode, flag);
41201 +
41202 + if (error)
41203 + return error;
41204 +
41205 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41206 + error = -EPERM;
41207 + goto exit;
41208 + }
41209 +
41210 + if (gr_handle_rawio(inode)) {
41211 + error = -EPERM;
41212 + goto exit;
41213 + }
41214 +
41215 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41216 + error = -EACCES;
41217 + goto exit;
41218 + }
41219 +exit:
41220 + return error;
41221 }
41222
41223 static int handle_truncate(struct file *filp)
41224 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41225 }
41226
41227 /*
41228 - * Note that while the flag value (low two bits) for sys_open means:
41229 - * 00 - read-only
41230 - * 01 - write-only
41231 - * 10 - read-write
41232 - * 11 - special
41233 - * it is changed into
41234 - * 00 - no permissions needed
41235 - * 01 - read-permission
41236 - * 10 - write-permission
41237 - * 11 - read-write
41238 - * for the internal routines (ie open_namei()/follow_link() etc)
41239 - * This is more logical, and also allows the 00 "no perm needed"
41240 - * to be used for symlinks (where the permissions are checked
41241 - * later).
41242 - *
41243 -*/
41244 -static inline int open_to_namei_flags(int flag)
41245 -{
41246 - if ((flag+1) & O_ACCMODE)
41247 - flag++;
41248 - return flag;
41249 -}
41250 -
41251 -/*
41252 * Handle the last step of open()
41253 */
41254 static struct file *do_last(struct nameidata *nd, struct path *path,
41255 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41256 struct dentry *dir = nd->path.dentry;
41257 struct dentry *dentry;
41258 int open_flag = op->open_flag;
41259 + int flag = open_to_namei_flags(open_flag);
41260 int will_truncate = open_flag & O_TRUNC;
41261 int want_write = 0;
41262 int acc_mode = op->acc_mode;
41263 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41264 /* Negative dentry, just create the file */
41265 if (!dentry->d_inode) {
41266 int mode = op->mode;
41267 +
41268 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41269 + error = -EACCES;
41270 + goto exit_mutex_unlock;
41271 + }
41272 +
41273 if (!IS_POSIXACL(dir->d_inode))
41274 mode &= ~current_umask();
41275 /*
41276 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41277 error = vfs_create(dir->d_inode, dentry, mode, nd);
41278 if (error)
41279 goto exit_mutex_unlock;
41280 + else
41281 + gr_handle_create(path->dentry, path->mnt);
41282 mutex_unlock(&dir->d_inode->i_mutex);
41283 dput(nd->path.dentry);
41284 nd->path.dentry = dentry;
41285 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41286 /*
41287 * It already exists.
41288 */
41289 +
41290 + /* only check if O_CREAT is specified, all other checks need to go
41291 + into may_open */
41292 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41293 + error = -EACCES;
41294 + goto exit_mutex_unlock;
41295 + }
41296 +
41297 mutex_unlock(&dir->d_inode->i_mutex);
41298 audit_inode(pathname, path->dentry);
41299
41300 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41301 error = may_mknod(mode);
41302 if (error)
41303 goto out_dput;
41304 +
41305 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41306 + error = -EPERM;
41307 + goto out_dput;
41308 + }
41309 +
41310 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41311 + error = -EACCES;
41312 + goto out_dput;
41313 + }
41314 +
41315 error = mnt_want_write(nd.path.mnt);
41316 if (error)
41317 goto out_dput;
41318 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41319 }
41320 out_drop_write:
41321 mnt_drop_write(nd.path.mnt);
41322 +
41323 + if (!error)
41324 + gr_handle_create(dentry, nd.path.mnt);
41325 out_dput:
41326 dput(dentry);
41327 out_unlock:
41328 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41329 if (IS_ERR(dentry))
41330 goto out_unlock;
41331
41332 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41333 + error = -EACCES;
41334 + goto out_dput;
41335 + }
41336 +
41337 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41338 mode &= ~current_umask();
41339 error = mnt_want_write(nd.path.mnt);
41340 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41341 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41342 out_drop_write:
41343 mnt_drop_write(nd.path.mnt);
41344 +
41345 + if (!error)
41346 + gr_handle_create(dentry, nd.path.mnt);
41347 +
41348 out_dput:
41349 dput(dentry);
41350 out_unlock:
41351 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41352 char * name;
41353 struct dentry *dentry;
41354 struct nameidata nd;
41355 + ino_t saved_ino = 0;
41356 + dev_t saved_dev = 0;
41357
41358 error = user_path_parent(dfd, pathname, &nd, &name);
41359 if (error)
41360 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41361 error = -ENOENT;
41362 goto exit3;
41363 }
41364 +
41365 + if (dentry->d_inode->i_nlink <= 1) {
41366 + saved_ino = dentry->d_inode->i_ino;
41367 + saved_dev = gr_get_dev_from_dentry(dentry);
41368 + }
41369 +
41370 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41371 + error = -EACCES;
41372 + goto exit3;
41373 + }
41374 +
41375 error = mnt_want_write(nd.path.mnt);
41376 if (error)
41377 goto exit3;
41378 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41379 if (error)
41380 goto exit4;
41381 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41382 + if (!error && (saved_dev || saved_ino))
41383 + gr_handle_delete(saved_ino, saved_dev);
41384 exit4:
41385 mnt_drop_write(nd.path.mnt);
41386 exit3:
41387 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41388 struct dentry *dentry;
41389 struct nameidata nd;
41390 struct inode *inode = NULL;
41391 + ino_t saved_ino = 0;
41392 + dev_t saved_dev = 0;
41393
41394 error = user_path_parent(dfd, pathname, &nd, &name);
41395 if (error)
41396 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41397 if (!inode)
41398 goto slashes;
41399 ihold(inode);
41400 +
41401 + if (inode->i_nlink <= 1) {
41402 + saved_ino = inode->i_ino;
41403 + saved_dev = gr_get_dev_from_dentry(dentry);
41404 + }
41405 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41406 + error = -EACCES;
41407 + goto exit2;
41408 + }
41409 +
41410 error = mnt_want_write(nd.path.mnt);
41411 if (error)
41412 goto exit2;
41413 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41414 if (error)
41415 goto exit3;
41416 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41417 + if (!error && (saved_ino || saved_dev))
41418 + gr_handle_delete(saved_ino, saved_dev);
41419 exit3:
41420 mnt_drop_write(nd.path.mnt);
41421 exit2:
41422 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41423 if (IS_ERR(dentry))
41424 goto out_unlock;
41425
41426 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41427 + error = -EACCES;
41428 + goto out_dput;
41429 + }
41430 +
41431 error = mnt_want_write(nd.path.mnt);
41432 if (error)
41433 goto out_dput;
41434 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41435 if (error)
41436 goto out_drop_write;
41437 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41438 + if (!error)
41439 + gr_handle_create(dentry, nd.path.mnt);
41440 out_drop_write:
41441 mnt_drop_write(nd.path.mnt);
41442 out_dput:
41443 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41444 error = PTR_ERR(new_dentry);
41445 if (IS_ERR(new_dentry))
41446 goto out_unlock;
41447 +
41448 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41449 + old_path.dentry->d_inode,
41450 + old_path.dentry->d_inode->i_mode, to)) {
41451 + error = -EACCES;
41452 + goto out_dput;
41453 + }
41454 +
41455 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41456 + old_path.dentry, old_path.mnt, to)) {
41457 + error = -EACCES;
41458 + goto out_dput;
41459 + }
41460 +
41461 error = mnt_want_write(nd.path.mnt);
41462 if (error)
41463 goto out_dput;
41464 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41465 if (error)
41466 goto out_drop_write;
41467 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41468 + if (!error)
41469 + gr_handle_create(new_dentry, nd.path.mnt);
41470 out_drop_write:
41471 mnt_drop_write(nd.path.mnt);
41472 out_dput:
41473 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41474 char *to;
41475 int error;
41476
41477 + pax_track_stack();
41478 +
41479 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41480 if (error)
41481 goto exit;
41482 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41483 if (new_dentry == trap)
41484 goto exit5;
41485
41486 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41487 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
41488 + to);
41489 + if (error)
41490 + goto exit5;
41491 +
41492 error = mnt_want_write(oldnd.path.mnt);
41493 if (error)
41494 goto exit5;
41495 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41496 goto exit6;
41497 error = vfs_rename(old_dir->d_inode, old_dentry,
41498 new_dir->d_inode, new_dentry);
41499 + if (!error)
41500 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41501 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41502 exit6:
41503 mnt_drop_write(oldnd.path.mnt);
41504 exit5:
41505 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
41506
41507 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41508 {
41509 + char tmpbuf[64];
41510 + const char *newlink;
41511 int len;
41512
41513 len = PTR_ERR(link);
41514 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
41515 len = strlen(link);
41516 if (len > (unsigned) buflen)
41517 len = buflen;
41518 - if (copy_to_user(buffer, link, len))
41519 +
41520 + if (len < sizeof(tmpbuf)) {
41521 + memcpy(tmpbuf, link, len);
41522 + newlink = tmpbuf;
41523 + } else
41524 + newlink = link;
41525 +
41526 + if (copy_to_user(buffer, newlink, len))
41527 len = -EFAULT;
41528 out:
41529 return len;
41530 diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
41531 --- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
41532 +++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
41533 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
41534 if (!(sb->s_flags & MS_RDONLY))
41535 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41536 up_write(&sb->s_umount);
41537 +
41538 + gr_log_remount(mnt->mnt_devname, retval);
41539 +
41540 return retval;
41541 }
41542
41543 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
41544 br_write_unlock(vfsmount_lock);
41545 up_write(&namespace_sem);
41546 release_mounts(&umount_list);
41547 +
41548 + gr_log_unmount(mnt->mnt_devname, retval);
41549 +
41550 return retval;
41551 }
41552
41553 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
41554 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
41555 MS_STRICTATIME);
41556
41557 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41558 + retval = -EPERM;
41559 + goto dput_out;
41560 + }
41561 +
41562 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41563 + retval = -EPERM;
41564 + goto dput_out;
41565 + }
41566 +
41567 if (flags & MS_REMOUNT)
41568 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41569 data_page);
41570 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
41571 dev_name, data_page);
41572 dput_out:
41573 path_put(&path);
41574 +
41575 + gr_log_mount(dev_name, dir_name, retval);
41576 +
41577 return retval;
41578 }
41579
41580 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
41581 if (error)
41582 goto out2;
41583
41584 + if (gr_handle_chroot_pivot()) {
41585 + error = -EPERM;
41586 + goto out2;
41587 + }
41588 +
41589 get_fs_root(current->fs, &root);
41590 error = lock_mount(&old);
41591 if (error)
41592 diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
41593 --- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41594 +++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
41595 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
41596 int res, val = 0, len;
41597 __u8 __name[NCP_MAXPATHLEN + 1];
41598
41599 + pax_track_stack();
41600 +
41601 if (dentry == dentry->d_sb->s_root)
41602 return 1;
41603
41604 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
41605 int error, res, len;
41606 __u8 __name[NCP_MAXPATHLEN + 1];
41607
41608 + pax_track_stack();
41609 +
41610 error = -EIO;
41611 if (!ncp_conn_valid(server))
41612 goto finished;
41613 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
41614 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41615 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41616
41617 + pax_track_stack();
41618 +
41619 ncp_age_dentry(server, dentry);
41620 len = sizeof(__name);
41621 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41622 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
41623 int error, len;
41624 __u8 __name[NCP_MAXPATHLEN + 1];
41625
41626 + pax_track_stack();
41627 +
41628 DPRINTK("ncp_mkdir: making %s/%s\n",
41629 dentry->d_parent->d_name.name, dentry->d_name.name);
41630
41631 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
41632 int old_len, new_len;
41633 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41634
41635 + pax_track_stack();
41636 +
41637 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41638 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41639 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41640 diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
41641 --- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41642 +++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
41643 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
41644 #endif
41645 struct ncp_entry_info finfo;
41646
41647 + pax_track_stack();
41648 +
41649 memset(&data, 0, sizeof(data));
41650 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41651 if (!server)
41652 diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
41653 --- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41654 +++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
41655 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
41656 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41657 nfsi->attrtimeo_timestamp = jiffies;
41658
41659 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41660 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41661 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41662 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41663 else
41664 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
41665 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41666 }
41667
41668 -static atomic_long_t nfs_attr_generation_counter;
41669 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41670
41671 static unsigned long nfs_read_attr_generation_counter(void)
41672 {
41673 - return atomic_long_read(&nfs_attr_generation_counter);
41674 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41675 }
41676
41677 unsigned long nfs_inc_attr_generation_counter(void)
41678 {
41679 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41680 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41681 }
41682
41683 void nfs_fattr_init(struct nfs_fattr *fattr)
41684 diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
41685 --- linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
41686 +++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
41687 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41688 unsigned int strhashval;
41689 int err;
41690
41691 + pax_track_stack();
41692 +
41693 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41694 (long long) lock->lk_offset,
41695 (long long) lock->lk_length);
41696 diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
41697 --- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
41698 +++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
41699 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41700 .dentry = dentry,
41701 };
41702
41703 + pax_track_stack();
41704 +
41705 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41706 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41707 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41708 diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
41709 --- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
41710 +++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
41711 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41712 } else {
41713 oldfs = get_fs();
41714 set_fs(KERNEL_DS);
41715 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41716 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41717 set_fs(oldfs);
41718 }
41719
41720 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41721
41722 /* Write the data. */
41723 oldfs = get_fs(); set_fs(KERNEL_DS);
41724 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41725 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41726 set_fs(oldfs);
41727 if (host_err < 0)
41728 goto out_nfserr;
41729 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41730 */
41731
41732 oldfs = get_fs(); set_fs(KERNEL_DS);
41733 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41734 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41735 set_fs(oldfs);
41736
41737 if (host_err < 0)
41738 diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
41739 --- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
41740 +++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
41741 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
41742 goto out_close_fd;
41743
41744 ret = -EFAULT;
41745 - if (copy_to_user(buf, &fanotify_event_metadata,
41746 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
41747 + copy_to_user(buf, &fanotify_event_metadata,
41748 fanotify_event_metadata.event_len))
41749 goto out_kill_access_response;
41750
41751 diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
41752 --- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
41753 +++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
41754 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41755 * get set to 0 so it will never get 'freed'
41756 */
41757 static struct fsnotify_event *q_overflow_event;
41758 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41759 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41760
41761 /**
41762 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41763 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41764 */
41765 u32 fsnotify_get_cookie(void)
41766 {
41767 - return atomic_inc_return(&fsnotify_sync_cookie);
41768 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41769 }
41770 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41771
41772 diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
41773 --- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41774 +++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
41775 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
41776 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41777 ~(s64)(ndir->itype.index.block_size - 1)));
41778 /* Bounds checks. */
41779 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41780 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41781 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41782 "inode 0x%lx or driver bug.", vdir->i_ino);
41783 goto err_out;
41784 diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
41785 --- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
41786 +++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
41787 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
41788 #endif /* NTFS_RW */
41789 };
41790
41791 -const struct file_operations ntfs_empty_file_ops = {};
41792 +const struct file_operations ntfs_empty_file_ops __read_only;
41793
41794 -const struct inode_operations ntfs_empty_inode_ops = {};
41795 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41796 diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
41797 --- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
41798 +++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
41799 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
41800 goto bail;
41801 }
41802
41803 - atomic_inc(&osb->alloc_stats.moves);
41804 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41805
41806 bail:
41807 if (handle)
41808 diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
41809 --- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
41810 +++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
41811 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
41812 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41813 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41814
41815 + pax_track_stack();
41816 +
41817 /* At some point it might be nice to break this function up a
41818 * bit. */
41819
41820 diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
41821 --- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
41822 +++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
41823 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
41824
41825 struct ocfs2_alloc_stats
41826 {
41827 - atomic_t moves;
41828 - atomic_t local_data;
41829 - atomic_t bitmap_data;
41830 - atomic_t bg_allocs;
41831 - atomic_t bg_extends;
41832 + atomic_unchecked_t moves;
41833 + atomic_unchecked_t local_data;
41834 + atomic_unchecked_t bitmap_data;
41835 + atomic_unchecked_t bg_allocs;
41836 + atomic_unchecked_t bg_extends;
41837 };
41838
41839 enum ocfs2_local_alloc_state
41840 diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
41841 --- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
41842 +++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
41843 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
41844 mlog_errno(status);
41845 goto bail;
41846 }
41847 - atomic_inc(&osb->alloc_stats.bg_extends);
41848 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41849
41850 /* You should never ask for this much metadata */
41851 BUG_ON(bits_wanted >
41852 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
41853 mlog_errno(status);
41854 goto bail;
41855 }
41856 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41857 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41858
41859 *suballoc_loc = res.sr_bg_blkno;
41860 *suballoc_bit_start = res.sr_bit_offset;
41861 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
41862 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
41863 res->sr_bits);
41864
41865 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41866 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41867
41868 BUG_ON(res->sr_bits != 1);
41869
41870 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
41871 mlog_errno(status);
41872 goto bail;
41873 }
41874 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41875 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41876
41877 BUG_ON(res.sr_bits != 1);
41878
41879 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
41880 cluster_start,
41881 num_clusters);
41882 if (!status)
41883 - atomic_inc(&osb->alloc_stats.local_data);
41884 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41885 } else {
41886 if (min_clusters > (osb->bitmap_cpg - 1)) {
41887 /* The only paths asking for contiguousness
41888 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
41889 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41890 res.sr_bg_blkno,
41891 res.sr_bit_offset);
41892 - atomic_inc(&osb->alloc_stats.bitmap_data);
41893 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41894 *num_clusters = res.sr_bits;
41895 }
41896 }
41897 diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
41898 --- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
41899 +++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
41900 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41901 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41902 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41903 "Stats",
41904 - atomic_read(&osb->alloc_stats.bitmap_data),
41905 - atomic_read(&osb->alloc_stats.local_data),
41906 - atomic_read(&osb->alloc_stats.bg_allocs),
41907 - atomic_read(&osb->alloc_stats.moves),
41908 - atomic_read(&osb->alloc_stats.bg_extends));
41909 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41910 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41911 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41912 + atomic_read_unchecked(&osb->alloc_stats.moves),
41913 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41914
41915 out += snprintf(buf + out, len - out,
41916 "%10s => State: %u Descriptor: %llu Size: %u bits "
41917 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
41918 spin_lock_init(&osb->osb_xattr_lock);
41919 ocfs2_init_steal_slots(osb);
41920
41921 - atomic_set(&osb->alloc_stats.moves, 0);
41922 - atomic_set(&osb->alloc_stats.local_data, 0);
41923 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41924 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41925 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41926 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41927 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41928 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41929 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41930 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41931
41932 /* Copy the blockcheck stats from the superblock probe */
41933 osb->osb_ecc_stats = *stats;
41934 diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
41935 --- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
41936 +++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
41937 @@ -142,7 +142,7 @@ bail:
41938
41939 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41940 {
41941 - char *link = nd_get_link(nd);
41942 + const char *link = nd_get_link(nd);
41943 if (!IS_ERR(link))
41944 kfree(link);
41945 }
41946 diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
41947 --- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
41948 +++ linux-3.0.4/fs/open.c 2011-08-23 21:48:14.000000000 -0400
41949 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
41950 error = locks_verify_truncate(inode, NULL, length);
41951 if (!error)
41952 error = security_path_truncate(&path);
41953 +
41954 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41955 + error = -EACCES;
41956 +
41957 if (!error)
41958 error = do_truncate(path.dentry, length, 0, NULL);
41959
41960 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41961 if (__mnt_is_readonly(path.mnt))
41962 res = -EROFS;
41963
41964 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41965 + res = -EACCES;
41966 +
41967 out_path_release:
41968 path_put(&path);
41969 out:
41970 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41971 if (error)
41972 goto dput_and_out;
41973
41974 + gr_log_chdir(path.dentry, path.mnt);
41975 +
41976 set_fs_pwd(current->fs, &path);
41977
41978 dput_and_out:
41979 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41980 goto out_putf;
41981
41982 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
41983 +
41984 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41985 + error = -EPERM;
41986 +
41987 + if (!error)
41988 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41989 +
41990 if (!error)
41991 set_fs_pwd(current->fs, &file->f_path);
41992 out_putf:
41993 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41994 if (error)
41995 goto dput_and_out;
41996
41997 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41998 + goto dput_and_out;
41999 +
42000 + if (gr_handle_chroot_caps(&path)) {
42001 + error = -ENOMEM;
42002 + goto dput_and_out;
42003 + }
42004 +
42005 set_fs_root(current->fs, &path);
42006 +
42007 + gr_handle_chroot_chdir(&path);
42008 +
42009 error = 0;
42010 dput_and_out:
42011 path_put(&path);
42012 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42013 err = mnt_want_write_file(file);
42014 if (err)
42015 goto out_putf;
42016 +
42017 mutex_lock(&inode->i_mutex);
42018 +
42019 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42020 + err = -EACCES;
42021 + goto out_unlock;
42022 + }
42023 +
42024 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42025 if (err)
42026 goto out_unlock;
42027 if (mode == (mode_t) -1)
42028 mode = inode->i_mode;
42029 +
42030 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42031 + err = -EACCES;
42032 + goto out_unlock;
42033 + }
42034 +
42035 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42036 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42037 err = notify_change(dentry, &newattrs);
42038 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42039 error = mnt_want_write(path.mnt);
42040 if (error)
42041 goto dput_and_out;
42042 +
42043 mutex_lock(&inode->i_mutex);
42044 +
42045 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42046 + error = -EACCES;
42047 + goto out_unlock;
42048 + }
42049 +
42050 error = security_path_chmod(path.dentry, path.mnt, mode);
42051 if (error)
42052 goto out_unlock;
42053 if (mode == (mode_t) -1)
42054 mode = inode->i_mode;
42055 +
42056 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42057 + error = -EACCES;
42058 + goto out_unlock;
42059 + }
42060 +
42061 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42062 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42063 error = notify_change(path.dentry, &newattrs);
42064 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
42065 int error;
42066 struct iattr newattrs;
42067
42068 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
42069 + return -EACCES;
42070 +
42071 newattrs.ia_valid = ATTR_CTIME;
42072 if (user != (uid_t) -1) {
42073 newattrs.ia_valid |= ATTR_UID;
42074 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
42075 if (!IS_ERR(tmp)) {
42076 fd = get_unused_fd_flags(flags);
42077 if (fd >= 0) {
42078 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42079 + struct file *f;
42080 + /* don't allow to be set by userland */
42081 + flags &= ~FMODE_GREXEC;
42082 + f = do_filp_open(dfd, tmp, &op, lookup);
42083 if (IS_ERR(f)) {
42084 put_unused_fd(fd);
42085 fd = PTR_ERR(f);
42086 diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42087 --- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
42088 +++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
42089 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42090 ldm_error ("A VBLK claims to have %d parts.", num);
42091 return false;
42092 }
42093 +
42094 if (rec >= num) {
42095 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42096 return false;
42097 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42098 goto found;
42099 }
42100
42101 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42102 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42103 if (!f) {
42104 ldm_crit ("Out of memory.");
42105 return false;
42106 diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42107 --- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
42108 +++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
42109 @@ -420,9 +420,9 @@ redo:
42110 }
42111 if (bufs) /* More to do? */
42112 continue;
42113 - if (!pipe->writers)
42114 + if (!atomic_read(&pipe->writers))
42115 break;
42116 - if (!pipe->waiting_writers) {
42117 + if (!atomic_read(&pipe->waiting_writers)) {
42118 /* syscall merging: Usually we must not sleep
42119 * if O_NONBLOCK is set, or if we got some data.
42120 * But if a writer sleeps in kernel space, then
42121 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42122 mutex_lock(&inode->i_mutex);
42123 pipe = inode->i_pipe;
42124
42125 - if (!pipe->readers) {
42126 + if (!atomic_read(&pipe->readers)) {
42127 send_sig(SIGPIPE, current, 0);
42128 ret = -EPIPE;
42129 goto out;
42130 @@ -530,7 +530,7 @@ redo1:
42131 for (;;) {
42132 int bufs;
42133
42134 - if (!pipe->readers) {
42135 + if (!atomic_read(&pipe->readers)) {
42136 send_sig(SIGPIPE, current, 0);
42137 if (!ret)
42138 ret = -EPIPE;
42139 @@ -616,9 +616,9 @@ redo2:
42140 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42141 do_wakeup = 0;
42142 }
42143 - pipe->waiting_writers++;
42144 + atomic_inc(&pipe->waiting_writers);
42145 pipe_wait(pipe);
42146 - pipe->waiting_writers--;
42147 + atomic_dec(&pipe->waiting_writers);
42148 }
42149 out:
42150 mutex_unlock(&inode->i_mutex);
42151 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
42152 mask = 0;
42153 if (filp->f_mode & FMODE_READ) {
42154 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42155 - if (!pipe->writers && filp->f_version != pipe->w_counter)
42156 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42157 mask |= POLLHUP;
42158 }
42159
42160 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
42161 * Most Unices do not set POLLERR for FIFOs but on Linux they
42162 * behave exactly like pipes for poll().
42163 */
42164 - if (!pipe->readers)
42165 + if (!atomic_read(&pipe->readers))
42166 mask |= POLLERR;
42167 }
42168
42169 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42170
42171 mutex_lock(&inode->i_mutex);
42172 pipe = inode->i_pipe;
42173 - pipe->readers -= decr;
42174 - pipe->writers -= decw;
42175 + atomic_sub(decr, &pipe->readers);
42176 + atomic_sub(decw, &pipe->writers);
42177
42178 - if (!pipe->readers && !pipe->writers) {
42179 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42180 free_pipe_info(inode);
42181 } else {
42182 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42183 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42184
42185 if (inode->i_pipe) {
42186 ret = 0;
42187 - inode->i_pipe->readers++;
42188 + atomic_inc(&inode->i_pipe->readers);
42189 }
42190
42191 mutex_unlock(&inode->i_mutex);
42192 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42193
42194 if (inode->i_pipe) {
42195 ret = 0;
42196 - inode->i_pipe->writers++;
42197 + atomic_inc(&inode->i_pipe->writers);
42198 }
42199
42200 mutex_unlock(&inode->i_mutex);
42201 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42202 if (inode->i_pipe) {
42203 ret = 0;
42204 if (filp->f_mode & FMODE_READ)
42205 - inode->i_pipe->readers++;
42206 + atomic_inc(&inode->i_pipe->readers);
42207 if (filp->f_mode & FMODE_WRITE)
42208 - inode->i_pipe->writers++;
42209 + atomic_inc(&inode->i_pipe->writers);
42210 }
42211
42212 mutex_unlock(&inode->i_mutex);
42213 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42214 inode->i_pipe = NULL;
42215 }
42216
42217 -static struct vfsmount *pipe_mnt __read_mostly;
42218 +struct vfsmount *pipe_mnt __read_mostly;
42219
42220 /*
42221 * pipefs_dname() is called from d_path().
42222 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42223 goto fail_iput;
42224 inode->i_pipe = pipe;
42225
42226 - pipe->readers = pipe->writers = 1;
42227 + atomic_set(&pipe->readers, 1);
42228 + atomic_set(&pipe->writers, 1);
42229 inode->i_fop = &rdwr_pipefifo_fops;
42230
42231 /*
42232 diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42233 --- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42234 +++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42235 @@ -60,6 +60,7 @@
42236 #include <linux/tty.h>
42237 #include <linux/string.h>
42238 #include <linux/mman.h>
42239 +#include <linux/grsecurity.h>
42240 #include <linux/proc_fs.h>
42241 #include <linux/ioport.h>
42242 #include <linux/uaccess.h>
42243 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42244 seq_putc(m, '\n');
42245 }
42246
42247 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42248 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
42249 +{
42250 + if (p->mm)
42251 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42252 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42253 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42254 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42255 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42256 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42257 + else
42258 + seq_printf(m, "PaX:\t-----\n");
42259 +}
42260 +#endif
42261 +
42262 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42263 struct pid *pid, struct task_struct *task)
42264 {
42265 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
42266 task_cpus_allowed(m, task);
42267 cpuset_task_status_allowed(m, task);
42268 task_context_switch_counts(m, task);
42269 +
42270 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42271 + task_pax(m, task);
42272 +#endif
42273 +
42274 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42275 + task_grsec_rbac(m, task);
42276 +#endif
42277 +
42278 return 0;
42279 }
42280
42281 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42282 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42283 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42284 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42285 +#endif
42286 +
42287 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42288 struct pid *pid, struct task_struct *task, int whole)
42289 {
42290 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
42291 cputime_t cutime, cstime, utime, stime;
42292 cputime_t cgtime, gtime;
42293 unsigned long rsslim = 0;
42294 - char tcomm[sizeof(task->comm)];
42295 + char tcomm[sizeof(task->comm)] = { 0 };
42296 unsigned long flags;
42297
42298 + pax_track_stack();
42299 +
42300 state = *get_task_state(task);
42301 vsize = eip = esp = 0;
42302 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42303 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
42304 gtime = task->gtime;
42305 }
42306
42307 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42308 + if (PAX_RAND_FLAGS(mm)) {
42309 + eip = 0;
42310 + esp = 0;
42311 + wchan = 0;
42312 + }
42313 +#endif
42314 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42315 + wchan = 0;
42316 + eip =0;
42317 + esp =0;
42318 +#endif
42319 +
42320 /* scale priority and nice values from timeslices to -20..20 */
42321 /* to make it look like a "normal" Unix priority/nice value */
42322 priority = task_prio(task);
42323 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
42324 vsize,
42325 mm ? get_mm_rss(mm) : 0,
42326 rsslim,
42327 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42328 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42329 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42330 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42331 +#else
42332 mm ? (permitted ? mm->start_code : 1) : 0,
42333 mm ? (permitted ? mm->end_code : 1) : 0,
42334 (permitted && mm) ? mm->start_stack : 0,
42335 +#endif
42336 esp,
42337 eip,
42338 /* The signal information here is obsolete.
42339 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42340
42341 return 0;
42342 }
42343 +
42344 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42345 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42346 +{
42347 + u32 curr_ip = 0;
42348 + unsigned long flags;
42349 +
42350 + if (lock_task_sighand(task, &flags)) {
42351 + curr_ip = task->signal->curr_ip;
42352 + unlock_task_sighand(task, &flags);
42353 + }
42354 +
42355 + return sprintf(buffer, "%pI4\n", &curr_ip);
42356 +}
42357 +#endif
42358 diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42359 --- linux-3.0.4/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
42360 +++ linux-3.0.4/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
42361 @@ -107,6 +107,22 @@ struct pid_entry {
42362 union proc_op op;
42363 };
42364
42365 +struct getdents_callback {
42366 + struct linux_dirent __user * current_dir;
42367 + struct linux_dirent __user * previous;
42368 + struct file * file;
42369 + int count;
42370 + int error;
42371 +};
42372 +
42373 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42374 + loff_t offset, u64 ino, unsigned int d_type)
42375 +{
42376 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
42377 + buf->error = -EINVAL;
42378 + return 0;
42379 +}
42380 +
42381 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42382 .name = (NAME), \
42383 .len = sizeof(NAME) - 1, \
42384 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42385 if (task == current)
42386 return mm;
42387
42388 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42389 + return ERR_PTR(-EPERM);
42390 +
42391 /*
42392 * If current is actively ptrace'ing, and would also be
42393 * permitted to freshly attach with ptrace now, permit it.
42394 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42395 if (!mm->arg_end)
42396 goto out_mm; /* Shh! No looking before we're done */
42397
42398 + if (gr_acl_handle_procpidmem(task))
42399 + goto out_mm;
42400 +
42401 len = mm->arg_end - mm->arg_start;
42402
42403 if (len > PAGE_SIZE)
42404 @@ -309,12 +331,28 @@ out:
42405 return res;
42406 }
42407
42408 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42409 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42410 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42411 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42412 +#endif
42413 +
42414 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42415 {
42416 struct mm_struct *mm = mm_for_maps(task);
42417 int res = PTR_ERR(mm);
42418 if (mm && !IS_ERR(mm)) {
42419 unsigned int nwords = 0;
42420 +
42421 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42422 + /* allow if we're currently ptracing this task */
42423 + if (PAX_RAND_FLAGS(mm) &&
42424 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42425 + mmput(mm);
42426 + return res;
42427 + }
42428 +#endif
42429 +
42430 do {
42431 nwords += 2;
42432 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42433 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42434 }
42435
42436
42437 -#ifdef CONFIG_KALLSYMS
42438 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42439 /*
42440 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42441 * Returns the resolved symbol. If that fails, simply return the address.
42442 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42443 mutex_unlock(&task->signal->cred_guard_mutex);
42444 }
42445
42446 -#ifdef CONFIG_STACKTRACE
42447 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42448
42449 #define MAX_STACK_TRACE_DEPTH 64
42450
42451 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42452 return count;
42453 }
42454
42455 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42456 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42457 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42458 {
42459 long nr;
42460 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
42461 /************************************************************************/
42462
42463 /* permission checks */
42464 -static int proc_fd_access_allowed(struct inode *inode)
42465 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42466 {
42467 struct task_struct *task;
42468 int allowed = 0;
42469 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
42470 */
42471 task = get_proc_task(inode);
42472 if (task) {
42473 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42474 + if (log)
42475 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42476 + else
42477 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42478 put_task_struct(task);
42479 }
42480 return allowed;
42481 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
42482 if (!task)
42483 goto out_no_task;
42484
42485 + if (gr_acl_handle_procpidmem(task))
42486 + goto out;
42487 +
42488 ret = -ENOMEM;
42489 page = (char *)__get_free_page(GFP_TEMPORARY);
42490 if (!page)
42491 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
42492 path_put(&nd->path);
42493
42494 /* Are we allowed to snoop on the tasks file descriptors? */
42495 - if (!proc_fd_access_allowed(inode))
42496 + if (!proc_fd_access_allowed(inode,0))
42497 goto out;
42498
42499 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42500 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
42501 struct path path;
42502
42503 /* Are we allowed to snoop on the tasks file descriptors? */
42504 - if (!proc_fd_access_allowed(inode))
42505 - goto out;
42506 + /* logging this is needed for learning on chromium to work properly,
42507 + but we don't want to flood the logs from 'ps' which does a readlink
42508 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42509 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42510 + */
42511 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42512 + if (!proc_fd_access_allowed(inode,0))
42513 + goto out;
42514 + } else {
42515 + if (!proc_fd_access_allowed(inode,1))
42516 + goto out;
42517 + }
42518
42519 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42520 if (error)
42521 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
42522 rcu_read_lock();
42523 cred = __task_cred(task);
42524 inode->i_uid = cred->euid;
42525 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42526 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42527 +#else
42528 inode->i_gid = cred->egid;
42529 +#endif
42530 rcu_read_unlock();
42531 }
42532 security_task_to_inode(task, inode);
42533 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
42534 struct inode *inode = dentry->d_inode;
42535 struct task_struct *task;
42536 const struct cred *cred;
42537 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42538 + const struct cred *tmpcred = current_cred();
42539 +#endif
42540
42541 generic_fillattr(inode, stat);
42542
42543 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
42544 stat->uid = 0;
42545 stat->gid = 0;
42546 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42547 +
42548 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42549 + rcu_read_unlock();
42550 + return -ENOENT;
42551 + }
42552 +
42553 if (task) {
42554 + cred = __task_cred(task);
42555 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42556 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42557 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42558 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42559 +#endif
42560 + ) {
42561 +#endif
42562 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42563 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42564 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42565 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42566 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42567 +#endif
42568 task_dumpable(task)) {
42569 - cred = __task_cred(task);
42570 stat->uid = cred->euid;
42571 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42572 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42573 +#else
42574 stat->gid = cred->egid;
42575 +#endif
42576 }
42577 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42578 + } else {
42579 + rcu_read_unlock();
42580 + return -ENOENT;
42581 + }
42582 +#endif
42583 }
42584 rcu_read_unlock();
42585 return 0;
42586 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
42587
42588 if (task) {
42589 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42590 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42591 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42592 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42593 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42594 +#endif
42595 task_dumpable(task)) {
42596 rcu_read_lock();
42597 cred = __task_cred(task);
42598 inode->i_uid = cred->euid;
42599 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42600 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42601 +#else
42602 inode->i_gid = cred->egid;
42603 +#endif
42604 rcu_read_unlock();
42605 } else {
42606 inode->i_uid = 0;
42607 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
42608 int fd = proc_fd(inode);
42609
42610 if (task) {
42611 - files = get_files_struct(task);
42612 + if (!gr_acl_handle_procpidmem(task))
42613 + files = get_files_struct(task);
42614 put_task_struct(task);
42615 }
42616 if (files) {
42617 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
42618 */
42619 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
42620 {
42621 + struct task_struct *task;
42622 int rv = generic_permission(inode, mask, flags, NULL);
42623 - if (rv == 0)
42624 - return 0;
42625 +
42626 if (task_pid(current) == proc_pid(inode))
42627 rv = 0;
42628 +
42629 + task = get_proc_task(inode);
42630 + if (task == NULL)
42631 + return rv;
42632 +
42633 + if (gr_acl_handle_procpidmem(task))
42634 + rv = -EACCES;
42635 +
42636 + put_task_struct(task);
42637 +
42638 return rv;
42639 }
42640
42641 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
42642 if (!task)
42643 goto out_no_task;
42644
42645 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42646 + goto out;
42647 +
42648 /*
42649 * Yes, it does not scale. And it should not. Don't add
42650 * new entries into /proc/<tgid>/ without very good reasons.
42651 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
42652 if (!task)
42653 goto out_no_task;
42654
42655 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42656 + goto out;
42657 +
42658 ret = 0;
42659 i = filp->f_pos;
42660 switch (i) {
42661 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
42662 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42663 void *cookie)
42664 {
42665 - char *s = nd_get_link(nd);
42666 + const char *s = nd_get_link(nd);
42667 if (!IS_ERR(s))
42668 __putname(s);
42669 }
42670 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
42671 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
42672 #endif
42673 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42674 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42675 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42676 INF("syscall", S_IRUGO, proc_pid_syscall),
42677 #endif
42678 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42679 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
42680 #ifdef CONFIG_SECURITY
42681 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42682 #endif
42683 -#ifdef CONFIG_KALLSYMS
42684 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42685 INF("wchan", S_IRUGO, proc_pid_wchan),
42686 #endif
42687 -#ifdef CONFIG_STACKTRACE
42688 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42689 ONE("stack", S_IRUGO, proc_pid_stack),
42690 #endif
42691 #ifdef CONFIG_SCHEDSTATS
42692 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
42693 #ifdef CONFIG_HARDWALL
42694 INF("hardwall", S_IRUGO, proc_pid_hardwall),
42695 #endif
42696 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42697 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42698 +#endif
42699 };
42700
42701 static int proc_tgid_base_readdir(struct file * filp,
42702 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
42703 if (!inode)
42704 goto out;
42705
42706 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42707 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42708 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42709 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42710 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42711 +#else
42712 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42713 +#endif
42714 inode->i_op = &proc_tgid_base_inode_operations;
42715 inode->i_fop = &proc_tgid_base_operations;
42716 inode->i_flags|=S_IMMUTABLE;
42717 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
42718 if (!task)
42719 goto out;
42720
42721 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42722 + goto out_put_task;
42723 +
42724 result = proc_pid_instantiate(dir, dentry, task, NULL);
42725 +out_put_task:
42726 put_task_struct(task);
42727 out:
42728 return result;
42729 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
42730 {
42731 unsigned int nr;
42732 struct task_struct *reaper;
42733 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42734 + const struct cred *tmpcred = current_cred();
42735 + const struct cred *itercred;
42736 +#endif
42737 + filldir_t __filldir = filldir;
42738 struct tgid_iter iter;
42739 struct pid_namespace *ns;
42740
42741 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
42742 for (iter = next_tgid(ns, iter);
42743 iter.task;
42744 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42745 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42746 + rcu_read_lock();
42747 + itercred = __task_cred(iter.task);
42748 +#endif
42749 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42750 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42751 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42752 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42753 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42754 +#endif
42755 + )
42756 +#endif
42757 + )
42758 + __filldir = &gr_fake_filldir;
42759 + else
42760 + __filldir = filldir;
42761 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42762 + rcu_read_unlock();
42763 +#endif
42764 filp->f_pos = iter.tgid + TGID_OFFSET;
42765 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42766 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42767 put_task_struct(iter.task);
42768 goto out;
42769 }
42770 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
42771 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42772 #endif
42773 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42774 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42775 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42776 INF("syscall", S_IRUGO, proc_pid_syscall),
42777 #endif
42778 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42779 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
42780 #ifdef CONFIG_SECURITY
42781 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42782 #endif
42783 -#ifdef CONFIG_KALLSYMS
42784 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42785 INF("wchan", S_IRUGO, proc_pid_wchan),
42786 #endif
42787 -#ifdef CONFIG_STACKTRACE
42788 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42789 ONE("stack", S_IRUGO, proc_pid_stack),
42790 #endif
42791 #ifdef CONFIG_SCHEDSTATS
42792 diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
42793 --- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
42794 +++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
42795 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42796
42797 static int __init proc_cmdline_init(void)
42798 {
42799 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42800 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42801 +#else
42802 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42803 +#endif
42804 return 0;
42805 }
42806 module_init(proc_cmdline_init);
42807 diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
42808 --- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
42809 +++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
42810 @@ -64,7 +64,11 @@ static const struct file_operations proc
42811
42812 static int __init proc_devices_init(void)
42813 {
42814 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42815 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42816 +#else
42817 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42818 +#endif
42819 return 0;
42820 }
42821 module_init(proc_devices_init);
42822 diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
42823 --- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
42824 +++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
42825 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
42826 if (de->mode) {
42827 inode->i_mode = de->mode;
42828 inode->i_uid = de->uid;
42829 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42830 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42831 +#else
42832 inode->i_gid = de->gid;
42833 +#endif
42834 }
42835 if (de->size)
42836 inode->i_size = de->size;
42837 diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
42838 --- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
42839 +++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
42840 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42841 struct pid *pid, struct task_struct *task);
42842 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42843 struct pid *pid, struct task_struct *task);
42844 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42845 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42846 +#endif
42847 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42848
42849 extern const struct file_operations proc_maps_operations;
42850 diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
42851 --- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
42852 +++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
42853 @@ -30,12 +30,12 @@ config PROC_FS
42854
42855 config PROC_KCORE
42856 bool "/proc/kcore support" if !ARM
42857 - depends on PROC_FS && MMU
42858 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42859
42860 config PROC_VMCORE
42861 bool "/proc/vmcore support"
42862 - depends on PROC_FS && CRASH_DUMP
42863 - default y
42864 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42865 + default n
42866 help
42867 Exports the dump image of crashed kernel in ELF format.
42868
42869 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42870 limited in memory.
42871
42872 config PROC_PAGE_MONITOR
42873 - default y
42874 - depends on PROC_FS && MMU
42875 + default n
42876 + depends on PROC_FS && MMU && !GRKERNSEC
42877 bool "Enable /proc page monitoring" if EXPERT
42878 help
42879 Various /proc files exist to monitor process memory utilization:
42880 diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
42881 --- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
42882 +++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
42883 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
42884 off_t offset = 0;
42885 struct kcore_list *m;
42886
42887 + pax_track_stack();
42888 +
42889 /* setup ELF header */
42890 elf = (struct elfhdr *) bufp;
42891 bufp += sizeof(struct elfhdr);
42892 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
42893 * the addresses in the elf_phdr on our list.
42894 */
42895 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42896 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42897 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42898 + if (tsz > buflen)
42899 tsz = buflen;
42900 -
42901 +
42902 while (buflen) {
42903 struct kcore_list *m;
42904
42905 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
42906 kfree(elf_buf);
42907 } else {
42908 if (kern_addr_valid(start)) {
42909 - unsigned long n;
42910 + char *elf_buf;
42911 + mm_segment_t oldfs;
42912
42913 - n = copy_to_user(buffer, (char *)start, tsz);
42914 - /*
42915 - * We cannot distingush between fault on source
42916 - * and fault on destination. When this happens
42917 - * we clear too and hope it will trigger the
42918 - * EFAULT again.
42919 - */
42920 - if (n) {
42921 - if (clear_user(buffer + tsz - n,
42922 - n))
42923 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42924 + if (!elf_buf)
42925 + return -ENOMEM;
42926 + oldfs = get_fs();
42927 + set_fs(KERNEL_DS);
42928 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42929 + set_fs(oldfs);
42930 + if (copy_to_user(buffer, elf_buf, tsz)) {
42931 + kfree(elf_buf);
42932 return -EFAULT;
42933 + }
42934 }
42935 + set_fs(oldfs);
42936 + kfree(elf_buf);
42937 } else {
42938 if (clear_user(buffer, tsz))
42939 return -EFAULT;
42940 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
42941
42942 static int open_kcore(struct inode *inode, struct file *filp)
42943 {
42944 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42945 + return -EPERM;
42946 +#endif
42947 if (!capable(CAP_SYS_RAWIO))
42948 return -EPERM;
42949 if (kcore_need_update)
42950 diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
42951 --- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
42952 +++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
42953 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42954 unsigned long pages[NR_LRU_LISTS];
42955 int lru;
42956
42957 + pax_track_stack();
42958 +
42959 /*
42960 * display in kilobytes.
42961 */
42962 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
42963 vmi.used >> 10,
42964 vmi.largest_chunk >> 10
42965 #ifdef CONFIG_MEMORY_FAILURE
42966 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42967 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42968 #endif
42969 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42970 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
42971 diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
42972 --- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
42973 +++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
42974 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
42975 if (len < 1)
42976 len = 1;
42977 seq_printf(m, "%*c", len, ' ');
42978 - seq_path(m, &file->f_path, "");
42979 + seq_path(m, &file->f_path, "\n\\");
42980 }
42981
42982 seq_putc(m, '\n');
42983 diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
42984 --- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
42985 +++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
42986 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
42987 struct task_struct *task;
42988 struct nsproxy *ns;
42989 struct net *net = NULL;
42990 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42991 + const struct cred *cred = current_cred();
42992 +#endif
42993 +
42994 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42995 + if (cred->fsuid)
42996 + return net;
42997 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42998 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42999 + return net;
43000 +#endif
43001
43002 rcu_read_lock();
43003 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43004 diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
43005 --- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
43006 +++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
43007 @@ -8,6 +8,8 @@
43008 #include <linux/namei.h>
43009 #include "internal.h"
43010
43011 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43012 +
43013 static const struct dentry_operations proc_sys_dentry_operations;
43014 static const struct file_operations proc_sys_file_operations;
43015 static const struct inode_operations proc_sys_inode_operations;
43016 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43017 if (!p)
43018 goto out;
43019
43020 + if (gr_handle_sysctl(p, MAY_EXEC))
43021 + goto out;
43022 +
43023 err = ERR_PTR(-ENOMEM);
43024 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43025 if (h)
43026 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
43027 if (*pos < file->f_pos)
43028 continue;
43029
43030 + if (gr_handle_sysctl(table, 0))
43031 + continue;
43032 +
43033 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43034 if (res)
43035 return res;
43036 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43037 if (IS_ERR(head))
43038 return PTR_ERR(head);
43039
43040 + if (table && gr_handle_sysctl(table, MAY_EXEC))
43041 + return -ENOENT;
43042 +
43043 generic_fillattr(inode, stat);
43044 if (table)
43045 stat->mode = (stat->mode & S_IFMT) | table->mode;
43046 diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43047 --- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
43048 +++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
43049 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
43050 #ifdef CONFIG_PROC_DEVICETREE
43051 proc_device_tree_init();
43052 #endif
43053 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43054 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43055 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43056 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43057 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43058 +#endif
43059 +#else
43060 proc_mkdir("bus", NULL);
43061 +#endif
43062 proc_sys_init();
43063 }
43064
43065 diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43066 --- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
43067 +++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
43068 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43069 "VmExe:\t%8lu kB\n"
43070 "VmLib:\t%8lu kB\n"
43071 "VmPTE:\t%8lu kB\n"
43072 - "VmSwap:\t%8lu kB\n",
43073 - hiwater_vm << (PAGE_SHIFT-10),
43074 + "VmSwap:\t%8lu kB\n"
43075 +
43076 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43077 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43078 +#endif
43079 +
43080 + ,hiwater_vm << (PAGE_SHIFT-10),
43081 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43082 mm->locked_vm << (PAGE_SHIFT-10),
43083 hiwater_rss << (PAGE_SHIFT-10),
43084 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43085 data << (PAGE_SHIFT-10),
43086 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43087 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43088 - swap << (PAGE_SHIFT-10));
43089 + swap << (PAGE_SHIFT-10)
43090 +
43091 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43092 + , mm->context.user_cs_base, mm->context.user_cs_limit
43093 +#endif
43094 +
43095 + );
43096 }
43097
43098 unsigned long task_vsize(struct mm_struct *mm)
43099 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43100 return ret;
43101 }
43102
43103 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43104 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43105 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
43106 + _mm->pax_flags & MF_PAX_SEGMEXEC))
43107 +#endif
43108 +
43109 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43110 {
43111 struct mm_struct *mm = vma->vm_mm;
43112 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43113 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43114 }
43115
43116 - /* We don't show the stack guard page in /proc/maps */
43117 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43118 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43119 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43120 +#else
43121 start = vma->vm_start;
43122 - if (stack_guard_page_start(vma, start))
43123 - start += PAGE_SIZE;
43124 end = vma->vm_end;
43125 - if (stack_guard_page_end(vma, end))
43126 - end -= PAGE_SIZE;
43127 +#endif
43128
43129 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43130 start,
43131 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43132 flags & VM_WRITE ? 'w' : '-',
43133 flags & VM_EXEC ? 'x' : '-',
43134 flags & VM_MAYSHARE ? 's' : 'p',
43135 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43136 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43137 +#else
43138 pgoff,
43139 +#endif
43140 MAJOR(dev), MINOR(dev), ino, &len);
43141
43142 /*
43143 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43144 */
43145 if (file) {
43146 pad_len_spaces(m, len);
43147 - seq_path(m, &file->f_path, "\n");
43148 + seq_path(m, &file->f_path, "\n\\");
43149 } else {
43150 const char *name = arch_vma_name(vma);
43151 if (!name) {
43152 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43153 if (vma->vm_start <= mm->brk &&
43154 vma->vm_end >= mm->start_brk) {
43155 name = "[heap]";
43156 - } else if (vma->vm_start <= mm->start_stack &&
43157 - vma->vm_end >= mm->start_stack) {
43158 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43159 + (vma->vm_start <= mm->start_stack &&
43160 + vma->vm_end >= mm->start_stack)) {
43161 name = "[stack]";
43162 }
43163 } else {
43164 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43165 };
43166
43167 memset(&mss, 0, sizeof mss);
43168 - mss.vma = vma;
43169 - /* mmap_sem is held in m_start */
43170 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43171 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43172 -
43173 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43174 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43175 +#endif
43176 + mss.vma = vma;
43177 + /* mmap_sem is held in m_start */
43178 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43179 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43180 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43181 + }
43182 +#endif
43183 show_map_vma(m, vma);
43184
43185 seq_printf(m,
43186 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43187 "KernelPageSize: %8lu kB\n"
43188 "MMUPageSize: %8lu kB\n"
43189 "Locked: %8lu kB\n",
43190 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43191 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43192 +#else
43193 (vma->vm_end - vma->vm_start) >> 10,
43194 +#endif
43195 mss.resident >> 10,
43196 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43197 mss.shared_clean >> 10,
43198 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43199
43200 if (file) {
43201 seq_printf(m, " file=");
43202 - seq_path(m, &file->f_path, "\n\t= ");
43203 + seq_path(m, &file->f_path, "\n\t\\= ");
43204 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43205 seq_printf(m, " heap");
43206 } else if (vma->vm_start <= mm->start_stack &&
43207 diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43208 --- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
43209 +++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
43210 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43211 else
43212 bytes += kobjsize(mm);
43213
43214 - if (current->fs && current->fs->users > 1)
43215 + if (current->fs && atomic_read(&current->fs->users) > 1)
43216 sbytes += kobjsize(current->fs);
43217 else
43218 bytes += kobjsize(current->fs);
43219 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43220
43221 if (file) {
43222 pad_len_spaces(m, len);
43223 - seq_path(m, &file->f_path, "");
43224 + seq_path(m, &file->f_path, "\n\\");
43225 } else if (mm) {
43226 if (vma->vm_start <= mm->start_stack &&
43227 vma->vm_end >= mm->start_stack) {
43228 diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43229 --- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
43230 +++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
43231 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43232 void quota_send_warning(short type, unsigned int id, dev_t dev,
43233 const char warntype)
43234 {
43235 - static atomic_t seq;
43236 + static atomic_unchecked_t seq;
43237 struct sk_buff *skb;
43238 void *msg_head;
43239 int ret;
43240 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43241 "VFS: Not enough memory to send quota warning.\n");
43242 return;
43243 }
43244 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43245 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43246 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43247 if (!msg_head) {
43248 printk(KERN_ERR
43249 diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43250 --- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
43251 +++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
43252 @@ -17,6 +17,7 @@
43253 #include <linux/security.h>
43254 #include <linux/syscalls.h>
43255 #include <linux/unistd.h>
43256 +#include <linux/namei.h>
43257
43258 #include <asm/uaccess.h>
43259
43260 @@ -67,6 +68,7 @@ struct old_linux_dirent {
43261
43262 struct readdir_callback {
43263 struct old_linux_dirent __user * dirent;
43264 + struct file * file;
43265 int result;
43266 };
43267
43268 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43269 buf->result = -EOVERFLOW;
43270 return -EOVERFLOW;
43271 }
43272 +
43273 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43274 + return 0;
43275 +
43276 buf->result++;
43277 dirent = buf->dirent;
43278 if (!access_ok(VERIFY_WRITE, dirent,
43279 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43280
43281 buf.result = 0;
43282 buf.dirent = dirent;
43283 + buf.file = file;
43284
43285 error = vfs_readdir(file, fillonedir, &buf);
43286 if (buf.result)
43287 @@ -142,6 +149,7 @@ struct linux_dirent {
43288 struct getdents_callback {
43289 struct linux_dirent __user * current_dir;
43290 struct linux_dirent __user * previous;
43291 + struct file * file;
43292 int count;
43293 int error;
43294 };
43295 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43296 buf->error = -EOVERFLOW;
43297 return -EOVERFLOW;
43298 }
43299 +
43300 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43301 + return 0;
43302 +
43303 dirent = buf->previous;
43304 if (dirent) {
43305 if (__put_user(offset, &dirent->d_off))
43306 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43307 buf.previous = NULL;
43308 buf.count = count;
43309 buf.error = 0;
43310 + buf.file = file;
43311
43312 error = vfs_readdir(file, filldir, &buf);
43313 if (error >= 0)
43314 @@ -229,6 +242,7 @@ out:
43315 struct getdents_callback64 {
43316 struct linux_dirent64 __user * current_dir;
43317 struct linux_dirent64 __user * previous;
43318 + struct file *file;
43319 int count;
43320 int error;
43321 };
43322 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43323 buf->error = -EINVAL; /* only used if we fail.. */
43324 if (reclen > buf->count)
43325 return -EINVAL;
43326 +
43327 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43328 + return 0;
43329 +
43330 dirent = buf->previous;
43331 if (dirent) {
43332 if (__put_user(offset, &dirent->d_off))
43333 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43334
43335 buf.current_dir = dirent;
43336 buf.previous = NULL;
43337 + buf.file = file;
43338 buf.count = count;
43339 buf.error = 0;
43340
43341 diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43342 --- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43343 +++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43344 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43345 struct reiserfs_dir_entry de;
43346 int ret = 0;
43347
43348 + pax_track_stack();
43349 +
43350 reiserfs_write_lock(inode->i_sb);
43351
43352 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43353 diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43354 --- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
43355 +++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
43356 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43357 return;
43358 }
43359
43360 - atomic_inc(&(fs_generation(tb->tb_sb)));
43361 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43362 do_balance_starts(tb);
43363
43364 /* balance leaf returns 0 except if combining L R and S into
43365 diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43366 --- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
43367 +++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
43368 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43369 struct buffer_head *bh;
43370 int i, j;
43371
43372 + pax_track_stack();
43373 +
43374 bh = __getblk(dev, block, bufsize);
43375 if (buffer_uptodate(bh))
43376 return (bh);
43377 diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43378 --- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
43379 +++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
43380 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
43381 unsigned long savelink = 1;
43382 struct timespec ctime;
43383
43384 + pax_track_stack();
43385 +
43386 /* three balancings: (1) old name removal, (2) new name insertion
43387 and (3) maybe "save" link insertion
43388 stat data updates: (1) old directory,
43389 diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43390 --- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
43391 +++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
43392 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43393 "SMALL_TAILS " : "NO_TAILS ",
43394 replay_only(sb) ? "REPLAY_ONLY " : "",
43395 convert_reiserfs(sb) ? "CONV " : "",
43396 - atomic_read(&r->s_generation_counter),
43397 + atomic_read_unchecked(&r->s_generation_counter),
43398 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43399 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43400 SF(s_good_search_by_key_reada), SF(s_bmaps),
43401 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
43402 struct journal_params *jp = &rs->s_v1.s_journal;
43403 char b[BDEVNAME_SIZE];
43404
43405 + pax_track_stack();
43406 +
43407 seq_printf(m, /* on-disk fields */
43408 "jp_journal_1st_block: \t%i\n"
43409 "jp_journal_dev: \t%s[%x]\n"
43410 diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43411 --- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
43412 +++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
43413 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43414 int iter = 0;
43415 #endif
43416
43417 + pax_track_stack();
43418 +
43419 BUG_ON(!th->t_trans_id);
43420
43421 init_tb_struct(th, &s_del_balance, sb, path,
43422 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43423 int retval;
43424 int quota_cut_bytes = 0;
43425
43426 + pax_track_stack();
43427 +
43428 BUG_ON(!th->t_trans_id);
43429
43430 le_key2cpu_key(&cpu_key, key);
43431 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43432 int quota_cut_bytes;
43433 loff_t tail_pos = 0;
43434
43435 + pax_track_stack();
43436 +
43437 BUG_ON(!th->t_trans_id);
43438
43439 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43440 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43441 int retval;
43442 int fs_gen;
43443
43444 + pax_track_stack();
43445 +
43446 BUG_ON(!th->t_trans_id);
43447
43448 fs_gen = get_generation(inode->i_sb);
43449 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43450 int fs_gen = 0;
43451 int quota_bytes = 0;
43452
43453 + pax_track_stack();
43454 +
43455 BUG_ON(!th->t_trans_id);
43456
43457 if (inode) { /* Do we count quotas for item? */
43458 diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
43459 --- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
43460 +++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
43461 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
43462 {.option_name = NULL}
43463 };
43464
43465 + pax_track_stack();
43466 +
43467 *blocks = 0;
43468 if (!options || !*options)
43469 /* use default configuration: create tails, journaling on, no
43470 diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
43471 --- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
43472 +++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
43473 @@ -20,6 +20,7 @@
43474 #include <linux/module.h>
43475 #include <linux/slab.h>
43476 #include <linux/poll.h>
43477 +#include <linux/security.h>
43478 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43479 #include <linux/file.h>
43480 #include <linux/fdtable.h>
43481 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
43482 int retval, i, timed_out = 0;
43483 unsigned long slack = 0;
43484
43485 + pax_track_stack();
43486 +
43487 rcu_read_lock();
43488 retval = max_select_fd(n, fds);
43489 rcu_read_unlock();
43490 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
43491 /* Allocate small arguments on the stack to save memory and be faster */
43492 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43493
43494 + pax_track_stack();
43495 +
43496 ret = -EINVAL;
43497 if (n < 0)
43498 goto out_nofds;
43499 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
43500 struct poll_list *walk = head;
43501 unsigned long todo = nfds;
43502
43503 + pax_track_stack();
43504 +
43505 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43506 if (nfds > rlimit(RLIMIT_NOFILE))
43507 return -EINVAL;
43508
43509 diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
43510 --- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
43511 +++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
43512 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43513 return 0;
43514 }
43515 if (!m->buf) {
43516 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43517 + m->size = PAGE_SIZE;
43518 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43519 if (!m->buf)
43520 return -ENOMEM;
43521 }
43522 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43523 Eoverflow:
43524 m->op->stop(m, p);
43525 kfree(m->buf);
43526 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43527 + m->size <<= 1;
43528 + m->buf = kmalloc(m->size, GFP_KERNEL);
43529 return !m->buf ? -ENOMEM : -EAGAIN;
43530 }
43531
43532 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43533 m->version = file->f_version;
43534 /* grab buffer if we didn't have one */
43535 if (!m->buf) {
43536 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43537 + m->size = PAGE_SIZE;
43538 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43539 if (!m->buf)
43540 goto Enomem;
43541 }
43542 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43543 goto Fill;
43544 m->op->stop(m, p);
43545 kfree(m->buf);
43546 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43547 + m->size <<= 1;
43548 + m->buf = kmalloc(m->size, GFP_KERNEL);
43549 if (!m->buf)
43550 goto Enomem;
43551 m->count = 0;
43552 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
43553 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
43554 void *data)
43555 {
43556 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
43557 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
43558 int res = -ENOMEM;
43559
43560 if (op) {
43561 diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
43562 --- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
43563 +++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
43564 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43565 pipe_lock(pipe);
43566
43567 for (;;) {
43568 - if (!pipe->readers) {
43569 + if (!atomic_read(&pipe->readers)) {
43570 send_sig(SIGPIPE, current, 0);
43571 if (!ret)
43572 ret = -EPIPE;
43573 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43574 do_wakeup = 0;
43575 }
43576
43577 - pipe->waiting_writers++;
43578 + atomic_inc(&pipe->waiting_writers);
43579 pipe_wait(pipe);
43580 - pipe->waiting_writers--;
43581 + atomic_dec(&pipe->waiting_writers);
43582 }
43583
43584 pipe_unlock(pipe);
43585 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
43586 .spd_release = spd_release_page,
43587 };
43588
43589 + pax_track_stack();
43590 +
43591 if (splice_grow_spd(pipe, &spd))
43592 return -ENOMEM;
43593
43594 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
43595 old_fs = get_fs();
43596 set_fs(get_ds());
43597 /* The cast to a user pointer is valid due to the set_fs() */
43598 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43599 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43600 set_fs(old_fs);
43601
43602 return res;
43603 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
43604 old_fs = get_fs();
43605 set_fs(get_ds());
43606 /* The cast to a user pointer is valid due to the set_fs() */
43607 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43608 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43609 set_fs(old_fs);
43610
43611 return res;
43612 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
43613 .spd_release = spd_release_page,
43614 };
43615
43616 + pax_track_stack();
43617 +
43618 if (splice_grow_spd(pipe, &spd))
43619 return -ENOMEM;
43620
43621 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
43622 goto err;
43623
43624 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43625 - vec[i].iov_base = (void __user *) page_address(page);
43626 + vec[i].iov_base = (__force void __user *) page_address(page);
43627 vec[i].iov_len = this_len;
43628 spd.pages[i] = page;
43629 spd.nr_pages++;
43630 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43631 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43632 {
43633 while (!pipe->nrbufs) {
43634 - if (!pipe->writers)
43635 + if (!atomic_read(&pipe->writers))
43636 return 0;
43637
43638 - if (!pipe->waiting_writers && sd->num_spliced)
43639 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43640 return 0;
43641
43642 if (sd->flags & SPLICE_F_NONBLOCK)
43643 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
43644 * out of the pipe right after the splice_to_pipe(). So set
43645 * PIPE_READERS appropriately.
43646 */
43647 - pipe->readers = 1;
43648 + atomic_set(&pipe->readers, 1);
43649
43650 current->splice_pipe = pipe;
43651 }
43652 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
43653 };
43654 long ret;
43655
43656 + pax_track_stack();
43657 +
43658 pipe = get_pipe_info(file);
43659 if (!pipe)
43660 return -EBADF;
43661 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
43662 ret = -ERESTARTSYS;
43663 break;
43664 }
43665 - if (!pipe->writers)
43666 + if (!atomic_read(&pipe->writers))
43667 break;
43668 - if (!pipe->waiting_writers) {
43669 + if (!atomic_read(&pipe->waiting_writers)) {
43670 if (flags & SPLICE_F_NONBLOCK) {
43671 ret = -EAGAIN;
43672 break;
43673 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
43674 pipe_lock(pipe);
43675
43676 while (pipe->nrbufs >= pipe->buffers) {
43677 - if (!pipe->readers) {
43678 + if (!atomic_read(&pipe->readers)) {
43679 send_sig(SIGPIPE, current, 0);
43680 ret = -EPIPE;
43681 break;
43682 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
43683 ret = -ERESTARTSYS;
43684 break;
43685 }
43686 - pipe->waiting_writers++;
43687 + atomic_inc(&pipe->waiting_writers);
43688 pipe_wait(pipe);
43689 - pipe->waiting_writers--;
43690 + atomic_dec(&pipe->waiting_writers);
43691 }
43692
43693 pipe_unlock(pipe);
43694 @@ -1819,14 +1825,14 @@ retry:
43695 pipe_double_lock(ipipe, opipe);
43696
43697 do {
43698 - if (!opipe->readers) {
43699 + if (!atomic_read(&opipe->readers)) {
43700 send_sig(SIGPIPE, current, 0);
43701 if (!ret)
43702 ret = -EPIPE;
43703 break;
43704 }
43705
43706 - if (!ipipe->nrbufs && !ipipe->writers)
43707 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43708 break;
43709
43710 /*
43711 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
43712 pipe_double_lock(ipipe, opipe);
43713
43714 do {
43715 - if (!opipe->readers) {
43716 + if (!atomic_read(&opipe->readers)) {
43717 send_sig(SIGPIPE, current, 0);
43718 if (!ret)
43719 ret = -EPIPE;
43720 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
43721 * return EAGAIN if we have the potential of some data in the
43722 * future, otherwise just return 0
43723 */
43724 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43725 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43726 ret = -EAGAIN;
43727
43728 pipe_unlock(ipipe);
43729 diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
43730 --- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
43731 +++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
43732 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43733
43734 struct sysfs_open_dirent {
43735 atomic_t refcnt;
43736 - atomic_t event;
43737 + atomic_unchecked_t event;
43738 wait_queue_head_t poll;
43739 struct list_head buffers; /* goes through sysfs_buffer.list */
43740 };
43741 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
43742 if (!sysfs_get_active(attr_sd))
43743 return -ENODEV;
43744
43745 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43746 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43747 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43748
43749 sysfs_put_active(attr_sd);
43750 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
43751 return -ENOMEM;
43752
43753 atomic_set(&new_od->refcnt, 0);
43754 - atomic_set(&new_od->event, 1);
43755 + atomic_set_unchecked(&new_od->event, 1);
43756 init_waitqueue_head(&new_od->poll);
43757 INIT_LIST_HEAD(&new_od->buffers);
43758 goto retry;
43759 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
43760
43761 sysfs_put_active(attr_sd);
43762
43763 - if (buffer->event != atomic_read(&od->event))
43764 + if (buffer->event != atomic_read_unchecked(&od->event))
43765 goto trigger;
43766
43767 return DEFAULT_POLLMASK;
43768 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
43769
43770 od = sd->s_attr.open;
43771 if (od) {
43772 - atomic_inc(&od->event);
43773 + atomic_inc_unchecked(&od->event);
43774 wake_up_interruptible(&od->poll);
43775 }
43776
43777 diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
43778 --- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
43779 +++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
43780 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43781 .s_name = "",
43782 .s_count = ATOMIC_INIT(1),
43783 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
43784 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43785 + .s_mode = S_IFDIR | S_IRWXU,
43786 +#else
43787 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43788 +#endif
43789 .s_ino = 1,
43790 };
43791
43792 diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
43793 --- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
43794 +++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
43795 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
43796
43797 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43798 {
43799 - char *page = nd_get_link(nd);
43800 + const char *page = nd_get_link(nd);
43801 if (!IS_ERR(page))
43802 free_page((unsigned long)page);
43803 }
43804 diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
43805 --- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
43806 +++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
43807 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
43808 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43809 int lastblock = 0;
43810
43811 + pax_track_stack();
43812 +
43813 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43814 prev_epos.block = iinfo->i_location;
43815 prev_epos.bh = NULL;
43816 diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
43817 --- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
43818 +++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
43819 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43820
43821 u8 udf_tag_checksum(const struct tag *t)
43822 {
43823 - u8 *data = (u8 *)t;
43824 + const u8 *data = (const u8 *)t;
43825 u8 checksum = 0;
43826 int i;
43827 for (i = 0; i < sizeof(struct tag); ++i)
43828 diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
43829 --- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
43830 +++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
43831 @@ -1,6 +1,7 @@
43832 #include <linux/compiler.h>
43833 #include <linux/file.h>
43834 #include <linux/fs.h>
43835 +#include <linux/security.h>
43836 #include <linux/linkage.h>
43837 #include <linux/mount.h>
43838 #include <linux/namei.h>
43839 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43840 goto mnt_drop_write_and_out;
43841 }
43842 }
43843 +
43844 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43845 + error = -EACCES;
43846 + goto mnt_drop_write_and_out;
43847 + }
43848 +
43849 mutex_lock(&inode->i_mutex);
43850 error = notify_change(path->dentry, &newattrs);
43851 mutex_unlock(&inode->i_mutex);
43852 diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
43853 --- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
43854 +++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
43855 @@ -17,8 +17,8 @@
43856 struct posix_acl *
43857 posix_acl_from_xattr(const void *value, size_t size)
43858 {
43859 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43860 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43861 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43862 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43863 int count;
43864 struct posix_acl *acl;
43865 struct posix_acl_entry *acl_e;
43866 diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
43867 --- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
43868 +++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
43869 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43870 * Extended attribute SET operations
43871 */
43872 static long
43873 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43874 +setxattr(struct path *path, const char __user *name, const void __user *value,
43875 size_t size, int flags)
43876 {
43877 int error;
43878 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
43879 return PTR_ERR(kvalue);
43880 }
43881
43882 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43883 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43884 + error = -EACCES;
43885 + goto out;
43886 + }
43887 +
43888 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43889 +out:
43890 kfree(kvalue);
43891 return error;
43892 }
43893 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43894 return error;
43895 error = mnt_want_write(path.mnt);
43896 if (!error) {
43897 - error = setxattr(path.dentry, name, value, size, flags);
43898 + error = setxattr(&path, name, value, size, flags);
43899 mnt_drop_write(path.mnt);
43900 }
43901 path_put(&path);
43902 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43903 return error;
43904 error = mnt_want_write(path.mnt);
43905 if (!error) {
43906 - error = setxattr(path.dentry, name, value, size, flags);
43907 + error = setxattr(&path, name, value, size, flags);
43908 mnt_drop_write(path.mnt);
43909 }
43910 path_put(&path);
43911 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43912 const void __user *,value, size_t, size, int, flags)
43913 {
43914 struct file *f;
43915 - struct dentry *dentry;
43916 int error = -EBADF;
43917
43918 f = fget(fd);
43919 if (!f)
43920 return error;
43921 - dentry = f->f_path.dentry;
43922 - audit_inode(NULL, dentry);
43923 + audit_inode(NULL, f->f_path.dentry);
43924 error = mnt_want_write_file(f);
43925 if (!error) {
43926 - error = setxattr(dentry, name, value, size, flags);
43927 + error = setxattr(&f->f_path, name, value, size, flags);
43928 mnt_drop_write(f->f_path.mnt);
43929 }
43930 fput(f);
43931 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
43932 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
43933 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
43934 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
43935 xfs_fsop_geom_t fsgeo;
43936 int error;
43937
43938 + memset(&fsgeo, 0, sizeof(fsgeo));
43939 error = xfs_fs_geometry(mp, &fsgeo, 3);
43940 if (error)
43941 return -error;
43942 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
43943 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
43944 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
43945 @@ -128,7 +128,7 @@ xfs_find_handle(
43946 }
43947
43948 error = -EFAULT;
43949 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43950 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43951 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43952 goto out_put;
43953
43954 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
43955 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
43956 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
43957 @@ -437,7 +437,7 @@ xfs_vn_put_link(
43958 struct nameidata *nd,
43959 void *p)
43960 {
43961 - char *s = nd_get_link(nd);
43962 + const char *s = nd_get_link(nd);
43963
43964 if (!IS_ERR(s))
43965 kfree(s);
43966 diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
43967 --- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
43968 +++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
43969 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
43970 int nmap,
43971 int ret_nmap);
43972 #else
43973 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43974 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43975 #endif /* DEBUG */
43976
43977 STATIC int
43978 diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
43979 --- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
43980 +++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
43981 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
43982 }
43983
43984 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43985 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43986 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43987 + char name[sfep->namelen];
43988 + memcpy(name, sfep->name, sfep->namelen);
43989 + if (filldir(dirent, name, sfep->namelen,
43990 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43991 + *offset = off & 0x7fffffff;
43992 + return 0;
43993 + }
43994 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43995 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43996 *offset = off & 0x7fffffff;
43997 return 0;
43998 diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
43999 --- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44000 +++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
44001 @@ -0,0 +1,105 @@
44002 +#include <linux/kernel.h>
44003 +#include <linux/mm.h>
44004 +#include <linux/slab.h>
44005 +#include <linux/vmalloc.h>
44006 +#include <linux/gracl.h>
44007 +#include <linux/grsecurity.h>
44008 +
44009 +static unsigned long alloc_stack_next = 1;
44010 +static unsigned long alloc_stack_size = 1;
44011 +static void **alloc_stack;
44012 +
44013 +static __inline__ int
44014 +alloc_pop(void)
44015 +{
44016 + if (alloc_stack_next == 1)
44017 + return 0;
44018 +
44019 + kfree(alloc_stack[alloc_stack_next - 2]);
44020 +
44021 + alloc_stack_next--;
44022 +
44023 + return 1;
44024 +}
44025 +
44026 +static __inline__ int
44027 +alloc_push(void *buf)
44028 +{
44029 + if (alloc_stack_next >= alloc_stack_size)
44030 + return 1;
44031 +
44032 + alloc_stack[alloc_stack_next - 1] = buf;
44033 +
44034 + alloc_stack_next++;
44035 +
44036 + return 0;
44037 +}
44038 +
44039 +void *
44040 +acl_alloc(unsigned long len)
44041 +{
44042 + void *ret = NULL;
44043 +
44044 + if (!len || len > PAGE_SIZE)
44045 + goto out;
44046 +
44047 + ret = kmalloc(len, GFP_KERNEL);
44048 +
44049 + if (ret) {
44050 + if (alloc_push(ret)) {
44051 + kfree(ret);
44052 + ret = NULL;
44053 + }
44054 + }
44055 +
44056 +out:
44057 + return ret;
44058 +}
44059 +
44060 +void *
44061 +acl_alloc_num(unsigned long num, unsigned long len)
44062 +{
44063 + if (!len || (num > (PAGE_SIZE / len)))
44064 + return NULL;
44065 +
44066 + return acl_alloc(num * len);
44067 +}
44068 +
44069 +void
44070 +acl_free_all(void)
44071 +{
44072 + if (gr_acl_is_enabled() || !alloc_stack)
44073 + return;
44074 +
44075 + while (alloc_pop()) ;
44076 +
44077 + if (alloc_stack) {
44078 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44079 + kfree(alloc_stack);
44080 + else
44081 + vfree(alloc_stack);
44082 + }
44083 +
44084 + alloc_stack = NULL;
44085 + alloc_stack_size = 1;
44086 + alloc_stack_next = 1;
44087 +
44088 + return;
44089 +}
44090 +
44091 +int
44092 +acl_alloc_stack_init(unsigned long size)
44093 +{
44094 + if ((size * sizeof (void *)) <= PAGE_SIZE)
44095 + alloc_stack =
44096 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44097 + else
44098 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
44099 +
44100 + alloc_stack_size = size;
44101 +
44102 + if (!alloc_stack)
44103 + return 0;
44104 + else
44105 + return 1;
44106 +}
44107 diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44108 --- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44109 +++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
44110 @@ -0,0 +1,4106 @@
44111 +#include <linux/kernel.h>
44112 +#include <linux/module.h>
44113 +#include <linux/sched.h>
44114 +#include <linux/mm.h>
44115 +#include <linux/file.h>
44116 +#include <linux/fs.h>
44117 +#include <linux/namei.h>
44118 +#include <linux/mount.h>
44119 +#include <linux/tty.h>
44120 +#include <linux/proc_fs.h>
44121 +#include <linux/lglock.h>
44122 +#include <linux/slab.h>
44123 +#include <linux/vmalloc.h>
44124 +#include <linux/types.h>
44125 +#include <linux/sysctl.h>
44126 +#include <linux/netdevice.h>
44127 +#include <linux/ptrace.h>
44128 +#include <linux/gracl.h>
44129 +#include <linux/gralloc.h>
44130 +#include <linux/grsecurity.h>
44131 +#include <linux/grinternal.h>
44132 +#include <linux/pid_namespace.h>
44133 +#include <linux/fdtable.h>
44134 +#include <linux/percpu.h>
44135 +
44136 +#include <asm/uaccess.h>
44137 +#include <asm/errno.h>
44138 +#include <asm/mman.h>
44139 +
44140 +static struct acl_role_db acl_role_set;
44141 +static struct name_db name_set;
44142 +static struct inodev_db inodev_set;
44143 +
44144 +/* for keeping track of userspace pointers used for subjects, so we
44145 + can share references in the kernel as well
44146 +*/
44147 +
44148 +static struct path real_root;
44149 +
44150 +static struct acl_subj_map_db subj_map_set;
44151 +
44152 +static struct acl_role_label *default_role;
44153 +
44154 +static struct acl_role_label *role_list;
44155 +
44156 +static u16 acl_sp_role_value;
44157 +
44158 +extern char *gr_shared_page[4];
44159 +static DEFINE_MUTEX(gr_dev_mutex);
44160 +DEFINE_RWLOCK(gr_inode_lock);
44161 +
44162 +struct gr_arg *gr_usermode;
44163 +
44164 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44165 +
44166 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44167 +extern void gr_clear_learn_entries(void);
44168 +
44169 +#ifdef CONFIG_GRKERNSEC_RESLOG
44170 +extern void gr_log_resource(const struct task_struct *task,
44171 + const int res, const unsigned long wanted, const int gt);
44172 +#endif
44173 +
44174 +unsigned char *gr_system_salt;
44175 +unsigned char *gr_system_sum;
44176 +
44177 +static struct sprole_pw **acl_special_roles = NULL;
44178 +static __u16 num_sprole_pws = 0;
44179 +
44180 +static struct acl_role_label *kernel_role = NULL;
44181 +
44182 +static unsigned int gr_auth_attempts = 0;
44183 +static unsigned long gr_auth_expires = 0UL;
44184 +
44185 +#ifdef CONFIG_NET
44186 +extern struct vfsmount *sock_mnt;
44187 +#endif
44188 +
44189 +extern struct vfsmount *pipe_mnt;
44190 +extern struct vfsmount *shm_mnt;
44191 +#ifdef CONFIG_HUGETLBFS
44192 +extern struct vfsmount *hugetlbfs_vfsmount;
44193 +#endif
44194 +
44195 +static struct acl_object_label *fakefs_obj_rw;
44196 +static struct acl_object_label *fakefs_obj_rwx;
44197 +
44198 +extern int gr_init_uidset(void);
44199 +extern void gr_free_uidset(void);
44200 +extern void gr_remove_uid(uid_t uid);
44201 +extern int gr_find_uid(uid_t uid);
44202 +
44203 +DECLARE_BRLOCK(vfsmount_lock);
44204 +
44205 +__inline__ int
44206 +gr_acl_is_enabled(void)
44207 +{
44208 + return (gr_status & GR_READY);
44209 +}
44210 +
44211 +#ifdef CONFIG_BTRFS_FS
44212 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44213 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44214 +#endif
44215 +
44216 +static inline dev_t __get_dev(const struct dentry *dentry)
44217 +{
44218 +#ifdef CONFIG_BTRFS_FS
44219 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44220 + return get_btrfs_dev_from_inode(dentry->d_inode);
44221 + else
44222 +#endif
44223 + return dentry->d_inode->i_sb->s_dev;
44224 +}
44225 +
44226 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44227 +{
44228 + return __get_dev(dentry);
44229 +}
44230 +
44231 +static char gr_task_roletype_to_char(struct task_struct *task)
44232 +{
44233 + switch (task->role->roletype &
44234 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44235 + GR_ROLE_SPECIAL)) {
44236 + case GR_ROLE_DEFAULT:
44237 + return 'D';
44238 + case GR_ROLE_USER:
44239 + return 'U';
44240 + case GR_ROLE_GROUP:
44241 + return 'G';
44242 + case GR_ROLE_SPECIAL:
44243 + return 'S';
44244 + }
44245 +
44246 + return 'X';
44247 +}
44248 +
44249 +char gr_roletype_to_char(void)
44250 +{
44251 + return gr_task_roletype_to_char(current);
44252 +}
44253 +
44254 +__inline__ int
44255 +gr_acl_tpe_check(void)
44256 +{
44257 + if (unlikely(!(gr_status & GR_READY)))
44258 + return 0;
44259 + if (current->role->roletype & GR_ROLE_TPE)
44260 + return 1;
44261 + else
44262 + return 0;
44263 +}
44264 +
44265 +int
44266 +gr_handle_rawio(const struct inode *inode)
44267 +{
44268 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44269 + if (inode && S_ISBLK(inode->i_mode) &&
44270 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44271 + !capable(CAP_SYS_RAWIO))
44272 + return 1;
44273 +#endif
44274 + return 0;
44275 +}
44276 +
44277 +static int
44278 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44279 +{
44280 + if (likely(lena != lenb))
44281 + return 0;
44282 +
44283 + return !memcmp(a, b, lena);
44284 +}
44285 +
44286 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44287 +{
44288 + *buflen -= namelen;
44289 + if (*buflen < 0)
44290 + return -ENAMETOOLONG;
44291 + *buffer -= namelen;
44292 + memcpy(*buffer, str, namelen);
44293 + return 0;
44294 +}
44295 +
44296 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44297 +{
44298 + return prepend(buffer, buflen, name->name, name->len);
44299 +}
44300 +
44301 +static int prepend_path(const struct path *path, struct path *root,
44302 + char **buffer, int *buflen)
44303 +{
44304 + struct dentry *dentry = path->dentry;
44305 + struct vfsmount *vfsmnt = path->mnt;
44306 + bool slash = false;
44307 + int error = 0;
44308 +
44309 + while (dentry != root->dentry || vfsmnt != root->mnt) {
44310 + struct dentry * parent;
44311 +
44312 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44313 + /* Global root? */
44314 + if (vfsmnt->mnt_parent == vfsmnt) {
44315 + goto out;
44316 + }
44317 + dentry = vfsmnt->mnt_mountpoint;
44318 + vfsmnt = vfsmnt->mnt_parent;
44319 + continue;
44320 + }
44321 + parent = dentry->d_parent;
44322 + prefetch(parent);
44323 + spin_lock(&dentry->d_lock);
44324 + error = prepend_name(buffer, buflen, &dentry->d_name);
44325 + spin_unlock(&dentry->d_lock);
44326 + if (!error)
44327 + error = prepend(buffer, buflen, "/", 1);
44328 + if (error)
44329 + break;
44330 +
44331 + slash = true;
44332 + dentry = parent;
44333 + }
44334 +
44335 +out:
44336 + if (!error && !slash)
44337 + error = prepend(buffer, buflen, "/", 1);
44338 +
44339 + return error;
44340 +}
44341 +
44342 +/* this must be called with vfsmount_lock and rename_lock held */
44343 +
44344 +static char *__our_d_path(const struct path *path, struct path *root,
44345 + char *buf, int buflen)
44346 +{
44347 + char *res = buf + buflen;
44348 + int error;
44349 +
44350 + prepend(&res, &buflen, "\0", 1);
44351 + error = prepend_path(path, root, &res, &buflen);
44352 + if (error)
44353 + return ERR_PTR(error);
44354 +
44355 + return res;
44356 +}
44357 +
44358 +static char *
44359 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44360 +{
44361 + char *retval;
44362 +
44363 + retval = __our_d_path(path, root, buf, buflen);
44364 + if (unlikely(IS_ERR(retval)))
44365 + retval = strcpy(buf, "<path too long>");
44366 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44367 + retval[1] = '\0';
44368 +
44369 + return retval;
44370 +}
44371 +
44372 +static char *
44373 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44374 + char *buf, int buflen)
44375 +{
44376 + struct path path;
44377 + char *res;
44378 +
44379 + path.dentry = (struct dentry *)dentry;
44380 + path.mnt = (struct vfsmount *)vfsmnt;
44381 +
44382 + /* we can use real_root.dentry, real_root.mnt, because this is only called
44383 + by the RBAC system */
44384 + res = gen_full_path(&path, &real_root, buf, buflen);
44385 +
44386 + return res;
44387 +}
44388 +
44389 +static char *
44390 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44391 + char *buf, int buflen)
44392 +{
44393 + char *res;
44394 + struct path path;
44395 + struct path root;
44396 + struct task_struct *reaper = &init_task;
44397 +
44398 + path.dentry = (struct dentry *)dentry;
44399 + path.mnt = (struct vfsmount *)vfsmnt;
44400 +
44401 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44402 + get_fs_root(reaper->fs, &root);
44403 +
44404 + write_seqlock(&rename_lock);
44405 + br_read_lock(vfsmount_lock);
44406 + res = gen_full_path(&path, &root, buf, buflen);
44407 + br_read_unlock(vfsmount_lock);
44408 + write_sequnlock(&rename_lock);
44409 +
44410 + path_put(&root);
44411 + return res;
44412 +}
44413 +
44414 +static char *
44415 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44416 +{
44417 + char *ret;
44418 + write_seqlock(&rename_lock);
44419 + br_read_lock(vfsmount_lock);
44420 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44421 + PAGE_SIZE);
44422 + br_read_unlock(vfsmount_lock);
44423 + write_sequnlock(&rename_lock);
44424 + return ret;
44425 +}
44426 +
44427 +char *
44428 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44429 +{
44430 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44431 + PAGE_SIZE);
44432 +}
44433 +
44434 +char *
44435 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44436 +{
44437 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44438 + PAGE_SIZE);
44439 +}
44440 +
44441 +char *
44442 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44443 +{
44444 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44445 + PAGE_SIZE);
44446 +}
44447 +
44448 +char *
44449 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44450 +{
44451 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44452 + PAGE_SIZE);
44453 +}
44454 +
44455 +char *
44456 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44457 +{
44458 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44459 + PAGE_SIZE);
44460 +}
44461 +
44462 +__inline__ __u32
44463 +to_gr_audit(const __u32 reqmode)
44464 +{
44465 + /* masks off auditable permission flags, then shifts them to create
44466 + auditing flags, and adds the special case of append auditing if
44467 + we're requesting write */
44468 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44469 +}
44470 +
44471 +struct acl_subject_label *
44472 +lookup_subject_map(const struct acl_subject_label *userp)
44473 +{
44474 + unsigned int index = shash(userp, subj_map_set.s_size);
44475 + struct subject_map *match;
44476 +
44477 + match = subj_map_set.s_hash[index];
44478 +
44479 + while (match && match->user != userp)
44480 + match = match->next;
44481 +
44482 + if (match != NULL)
44483 + return match->kernel;
44484 + else
44485 + return NULL;
44486 +}
44487 +
44488 +static void
44489 +insert_subj_map_entry(struct subject_map *subjmap)
44490 +{
44491 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44492 + struct subject_map **curr;
44493 +
44494 + subjmap->prev = NULL;
44495 +
44496 + curr = &subj_map_set.s_hash[index];
44497 + if (*curr != NULL)
44498 + (*curr)->prev = subjmap;
44499 +
44500 + subjmap->next = *curr;
44501 + *curr = subjmap;
44502 +
44503 + return;
44504 +}
44505 +
44506 +static struct acl_role_label *
44507 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44508 + const gid_t gid)
44509 +{
44510 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44511 + struct acl_role_label *match;
44512 + struct role_allowed_ip *ipp;
44513 + unsigned int x;
44514 + u32 curr_ip = task->signal->curr_ip;
44515 +
44516 + task->signal->saved_ip = curr_ip;
44517 +
44518 + match = acl_role_set.r_hash[index];
44519 +
44520 + while (match) {
44521 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44522 + for (x = 0; x < match->domain_child_num; x++) {
44523 + if (match->domain_children[x] == uid)
44524 + goto found;
44525 + }
44526 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44527 + break;
44528 + match = match->next;
44529 + }
44530 +found:
44531 + if (match == NULL) {
44532 + try_group:
44533 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44534 + match = acl_role_set.r_hash[index];
44535 +
44536 + while (match) {
44537 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44538 + for (x = 0; x < match->domain_child_num; x++) {
44539 + if (match->domain_children[x] == gid)
44540 + goto found2;
44541 + }
44542 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44543 + break;
44544 + match = match->next;
44545 + }
44546 +found2:
44547 + if (match == NULL)
44548 + match = default_role;
44549 + if (match->allowed_ips == NULL)
44550 + return match;
44551 + else {
44552 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44553 + if (likely
44554 + ((ntohl(curr_ip) & ipp->netmask) ==
44555 + (ntohl(ipp->addr) & ipp->netmask)))
44556 + return match;
44557 + }
44558 + match = default_role;
44559 + }
44560 + } else if (match->allowed_ips == NULL) {
44561 + return match;
44562 + } else {
44563 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44564 + if (likely
44565 + ((ntohl(curr_ip) & ipp->netmask) ==
44566 + (ntohl(ipp->addr) & ipp->netmask)))
44567 + return match;
44568 + }
44569 + goto try_group;
44570 + }
44571 +
44572 + return match;
44573 +}
44574 +
44575 +struct acl_subject_label *
44576 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44577 + const struct acl_role_label *role)
44578 +{
44579 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44580 + struct acl_subject_label *match;
44581 +
44582 + match = role->subj_hash[index];
44583 +
44584 + while (match && (match->inode != ino || match->device != dev ||
44585 + (match->mode & GR_DELETED))) {
44586 + match = match->next;
44587 + }
44588 +
44589 + if (match && !(match->mode & GR_DELETED))
44590 + return match;
44591 + else
44592 + return NULL;
44593 +}
44594 +
44595 +struct acl_subject_label *
44596 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44597 + const struct acl_role_label *role)
44598 +{
44599 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44600 + struct acl_subject_label *match;
44601 +
44602 + match = role->subj_hash[index];
44603 +
44604 + while (match && (match->inode != ino || match->device != dev ||
44605 + !(match->mode & GR_DELETED))) {
44606 + match = match->next;
44607 + }
44608 +
44609 + if (match && (match->mode & GR_DELETED))
44610 + return match;
44611 + else
44612 + return NULL;
44613 +}
44614 +
44615 +static struct acl_object_label *
44616 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44617 + const struct acl_subject_label *subj)
44618 +{
44619 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44620 + struct acl_object_label *match;
44621 +
44622 + match = subj->obj_hash[index];
44623 +
44624 + while (match && (match->inode != ino || match->device != dev ||
44625 + (match->mode & GR_DELETED))) {
44626 + match = match->next;
44627 + }
44628 +
44629 + if (match && !(match->mode & GR_DELETED))
44630 + return match;
44631 + else
44632 + return NULL;
44633 +}
44634 +
44635 +static struct acl_object_label *
44636 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44637 + const struct acl_subject_label *subj)
44638 +{
44639 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44640 + struct acl_object_label *match;
44641 +
44642 + match = subj->obj_hash[index];
44643 +
44644 + while (match && (match->inode != ino || match->device != dev ||
44645 + !(match->mode & GR_DELETED))) {
44646 + match = match->next;
44647 + }
44648 +
44649 + if (match && (match->mode & GR_DELETED))
44650 + return match;
44651 +
44652 + match = subj->obj_hash[index];
44653 +
44654 + while (match && (match->inode != ino || match->device != dev ||
44655 + (match->mode & GR_DELETED))) {
44656 + match = match->next;
44657 + }
44658 +
44659 + if (match && !(match->mode & GR_DELETED))
44660 + return match;
44661 + else
44662 + return NULL;
44663 +}
44664 +
44665 +static struct name_entry *
44666 +lookup_name_entry(const char *name)
44667 +{
44668 + unsigned int len = strlen(name);
44669 + unsigned int key = full_name_hash(name, len);
44670 + unsigned int index = key % name_set.n_size;
44671 + struct name_entry *match;
44672 +
44673 + match = name_set.n_hash[index];
44674 +
44675 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44676 + match = match->next;
44677 +
44678 + return match;
44679 +}
44680 +
44681 +static struct name_entry *
44682 +lookup_name_entry_create(const char *name)
44683 +{
44684 + unsigned int len = strlen(name);
44685 + unsigned int key = full_name_hash(name, len);
44686 + unsigned int index = key % name_set.n_size;
44687 + struct name_entry *match;
44688 +
44689 + match = name_set.n_hash[index];
44690 +
44691 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44692 + !match->deleted))
44693 + match = match->next;
44694 +
44695 + if (match && match->deleted)
44696 + return match;
44697 +
44698 + match = name_set.n_hash[index];
44699 +
44700 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44701 + match->deleted))
44702 + match = match->next;
44703 +
44704 + if (match && !match->deleted)
44705 + return match;
44706 + else
44707 + return NULL;
44708 +}
44709 +
44710 +static struct inodev_entry *
44711 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44712 +{
44713 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44714 + struct inodev_entry *match;
44715 +
44716 + match = inodev_set.i_hash[index];
44717 +
44718 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44719 + match = match->next;
44720 +
44721 + return match;
44722 +}
44723 +
44724 +static void
44725 +insert_inodev_entry(struct inodev_entry *entry)
44726 +{
44727 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44728 + inodev_set.i_size);
44729 + struct inodev_entry **curr;
44730 +
44731 + entry->prev = NULL;
44732 +
44733 + curr = &inodev_set.i_hash[index];
44734 + if (*curr != NULL)
44735 + (*curr)->prev = entry;
44736 +
44737 + entry->next = *curr;
44738 + *curr = entry;
44739 +
44740 + return;
44741 +}
44742 +
44743 +static void
44744 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44745 +{
44746 + unsigned int index =
44747 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44748 + struct acl_role_label **curr;
44749 + struct acl_role_label *tmp;
44750 +
44751 + curr = &acl_role_set.r_hash[index];
44752 +
44753 + /* if role was already inserted due to domains and already has
44754 + a role in the same bucket as it attached, then we need to
44755 + combine these two buckets
44756 + */
44757 + if (role->next) {
44758 + tmp = role->next;
44759 + while (tmp->next)
44760 + tmp = tmp->next;
44761 + tmp->next = *curr;
44762 + } else
44763 + role->next = *curr;
44764 + *curr = role;
44765 +
44766 + return;
44767 +}
44768 +
44769 +static void
44770 +insert_acl_role_label(struct acl_role_label *role)
44771 +{
44772 + int i;
44773 +
44774 + if (role_list == NULL) {
44775 + role_list = role;
44776 + role->prev = NULL;
44777 + } else {
44778 + role->prev = role_list;
44779 + role_list = role;
44780 + }
44781 +
44782 + /* used for hash chains */
44783 + role->next = NULL;
44784 +
44785 + if (role->roletype & GR_ROLE_DOMAIN) {
44786 + for (i = 0; i < role->domain_child_num; i++)
44787 + __insert_acl_role_label(role, role->domain_children[i]);
44788 + } else
44789 + __insert_acl_role_label(role, role->uidgid);
44790 +}
44791 +
44792 +static int
44793 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44794 +{
44795 + struct name_entry **curr, *nentry;
44796 + struct inodev_entry *ientry;
44797 + unsigned int len = strlen(name);
44798 + unsigned int key = full_name_hash(name, len);
44799 + unsigned int index = key % name_set.n_size;
44800 +
44801 + curr = &name_set.n_hash[index];
44802 +
44803 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44804 + curr = &((*curr)->next);
44805 +
44806 + if (*curr != NULL)
44807 + return 1;
44808 +
44809 + nentry = acl_alloc(sizeof (struct name_entry));
44810 + if (nentry == NULL)
44811 + return 0;
44812 + ientry = acl_alloc(sizeof (struct inodev_entry));
44813 + if (ientry == NULL)
44814 + return 0;
44815 + ientry->nentry = nentry;
44816 +
44817 + nentry->key = key;
44818 + nentry->name = name;
44819 + nentry->inode = inode;
44820 + nentry->device = device;
44821 + nentry->len = len;
44822 + nentry->deleted = deleted;
44823 +
44824 + nentry->prev = NULL;
44825 + curr = &name_set.n_hash[index];
44826 + if (*curr != NULL)
44827 + (*curr)->prev = nentry;
44828 + nentry->next = *curr;
44829 + *curr = nentry;
44830 +
44831 + /* insert us into the table searchable by inode/dev */
44832 + insert_inodev_entry(ientry);
44833 +
44834 + return 1;
44835 +}
44836 +
44837 +static void
44838 +insert_acl_obj_label(struct acl_object_label *obj,
44839 + struct acl_subject_label *subj)
44840 +{
44841 + unsigned int index =
44842 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44843 + struct acl_object_label **curr;
44844 +
44845 +
44846 + obj->prev = NULL;
44847 +
44848 + curr = &subj->obj_hash[index];
44849 + if (*curr != NULL)
44850 + (*curr)->prev = obj;
44851 +
44852 + obj->next = *curr;
44853 + *curr = obj;
44854 +
44855 + return;
44856 +}
44857 +
44858 +static void
44859 +insert_acl_subj_label(struct acl_subject_label *obj,
44860 + struct acl_role_label *role)
44861 +{
44862 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44863 + struct acl_subject_label **curr;
44864 +
44865 + obj->prev = NULL;
44866 +
44867 + curr = &role->subj_hash[index];
44868 + if (*curr != NULL)
44869 + (*curr)->prev = obj;
44870 +
44871 + obj->next = *curr;
44872 + *curr = obj;
44873 +
44874 + return;
44875 +}
44876 +
44877 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44878 +
44879 +static void *
44880 +create_table(__u32 * len, int elementsize)
44881 +{
44882 + unsigned int table_sizes[] = {
44883 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44884 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44885 + 4194301, 8388593, 16777213, 33554393, 67108859
44886 + };
44887 + void *newtable = NULL;
44888 + unsigned int pwr = 0;
44889 +
44890 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44891 + table_sizes[pwr] <= *len)
44892 + pwr++;
44893 +
44894 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44895 + return newtable;
44896 +
44897 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44898 + newtable =
44899 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44900 + else
44901 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44902 +
44903 + *len = table_sizes[pwr];
44904 +
44905 + return newtable;
44906 +}
44907 +
44908 +static int
44909 +init_variables(const struct gr_arg *arg)
44910 +{
44911 + struct task_struct *reaper = &init_task;
44912 + unsigned int stacksize;
44913 +
44914 + subj_map_set.s_size = arg->role_db.num_subjects;
44915 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44916 + name_set.n_size = arg->role_db.num_objects;
44917 + inodev_set.i_size = arg->role_db.num_objects;
44918 +
44919 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44920 + !name_set.n_size || !inodev_set.i_size)
44921 + return 1;
44922 +
44923 + if (!gr_init_uidset())
44924 + return 1;
44925 +
44926 + /* set up the stack that holds allocation info */
44927 +
44928 + stacksize = arg->role_db.num_pointers + 5;
44929 +
44930 + if (!acl_alloc_stack_init(stacksize))
44931 + return 1;
44932 +
44933 + /* grab reference for the real root dentry and vfsmount */
44934 + get_fs_root(reaper->fs, &real_root);
44935 +
44936 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44937 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
44938 +#endif
44939 +
44940 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44941 + if (fakefs_obj_rw == NULL)
44942 + return 1;
44943 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44944 +
44945 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44946 + if (fakefs_obj_rwx == NULL)
44947 + return 1;
44948 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44949 +
44950 + subj_map_set.s_hash =
44951 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44952 + acl_role_set.r_hash =
44953 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44954 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44955 + inodev_set.i_hash =
44956 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44957 +
44958 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44959 + !name_set.n_hash || !inodev_set.i_hash)
44960 + return 1;
44961 +
44962 + memset(subj_map_set.s_hash, 0,
44963 + sizeof(struct subject_map *) * subj_map_set.s_size);
44964 + memset(acl_role_set.r_hash, 0,
44965 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44966 + memset(name_set.n_hash, 0,
44967 + sizeof (struct name_entry *) * name_set.n_size);
44968 + memset(inodev_set.i_hash, 0,
44969 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44970 +
44971 + return 0;
44972 +}
44973 +
44974 +/* free information not needed after startup
44975 + currently contains user->kernel pointer mappings for subjects
44976 +*/
44977 +
44978 +static void
44979 +free_init_variables(void)
44980 +{
44981 + __u32 i;
44982 +
44983 + if (subj_map_set.s_hash) {
44984 + for (i = 0; i < subj_map_set.s_size; i++) {
44985 + if (subj_map_set.s_hash[i]) {
44986 + kfree(subj_map_set.s_hash[i]);
44987 + subj_map_set.s_hash[i] = NULL;
44988 + }
44989 + }
44990 +
44991 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44992 + PAGE_SIZE)
44993 + kfree(subj_map_set.s_hash);
44994 + else
44995 + vfree(subj_map_set.s_hash);
44996 + }
44997 +
44998 + return;
44999 +}
45000 +
45001 +static void
45002 +free_variables(void)
45003 +{
45004 + struct acl_subject_label *s;
45005 + struct acl_role_label *r;
45006 + struct task_struct *task, *task2;
45007 + unsigned int x;
45008 +
45009 + gr_clear_learn_entries();
45010 +
45011 + read_lock(&tasklist_lock);
45012 + do_each_thread(task2, task) {
45013 + task->acl_sp_role = 0;
45014 + task->acl_role_id = 0;
45015 + task->acl = NULL;
45016 + task->role = NULL;
45017 + } while_each_thread(task2, task);
45018 + read_unlock(&tasklist_lock);
45019 +
45020 + /* release the reference to the real root dentry and vfsmount */
45021 + path_put(&real_root);
45022 +
45023 + /* free all object hash tables */
45024 +
45025 + FOR_EACH_ROLE_START(r)
45026 + if (r->subj_hash == NULL)
45027 + goto next_role;
45028 + FOR_EACH_SUBJECT_START(r, s, x)
45029 + if (s->obj_hash == NULL)
45030 + break;
45031 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45032 + kfree(s->obj_hash);
45033 + else
45034 + vfree(s->obj_hash);
45035 + FOR_EACH_SUBJECT_END(s, x)
45036 + FOR_EACH_NESTED_SUBJECT_START(r, s)
45037 + if (s->obj_hash == NULL)
45038 + break;
45039 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45040 + kfree(s->obj_hash);
45041 + else
45042 + vfree(s->obj_hash);
45043 + FOR_EACH_NESTED_SUBJECT_END(s)
45044 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45045 + kfree(r->subj_hash);
45046 + else
45047 + vfree(r->subj_hash);
45048 + r->subj_hash = NULL;
45049 +next_role:
45050 + FOR_EACH_ROLE_END(r)
45051 +
45052 + acl_free_all();
45053 +
45054 + if (acl_role_set.r_hash) {
45055 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45056 + PAGE_SIZE)
45057 + kfree(acl_role_set.r_hash);
45058 + else
45059 + vfree(acl_role_set.r_hash);
45060 + }
45061 + if (name_set.n_hash) {
45062 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
45063 + PAGE_SIZE)
45064 + kfree(name_set.n_hash);
45065 + else
45066 + vfree(name_set.n_hash);
45067 + }
45068 +
45069 + if (inodev_set.i_hash) {
45070 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45071 + PAGE_SIZE)
45072 + kfree(inodev_set.i_hash);
45073 + else
45074 + vfree(inodev_set.i_hash);
45075 + }
45076 +
45077 + gr_free_uidset();
45078 +
45079 + memset(&name_set, 0, sizeof (struct name_db));
45080 + memset(&inodev_set, 0, sizeof (struct inodev_db));
45081 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45082 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45083 +
45084 + default_role = NULL;
45085 + role_list = NULL;
45086 +
45087 + return;
45088 +}
45089 +
45090 +static __u32
45091 +count_user_objs(struct acl_object_label *userp)
45092 +{
45093 + struct acl_object_label o_tmp;
45094 + __u32 num = 0;
45095 +
45096 + while (userp) {
45097 + if (copy_from_user(&o_tmp, userp,
45098 + sizeof (struct acl_object_label)))
45099 + break;
45100 +
45101 + userp = o_tmp.prev;
45102 + num++;
45103 + }
45104 +
45105 + return num;
45106 +}
45107 +
45108 +static struct acl_subject_label *
45109 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45110 +
45111 +static int
45112 +copy_user_glob(struct acl_object_label *obj)
45113 +{
45114 + struct acl_object_label *g_tmp, **guser;
45115 + unsigned int len;
45116 + char *tmp;
45117 +
45118 + if (obj->globbed == NULL)
45119 + return 0;
45120 +
45121 + guser = &obj->globbed;
45122 + while (*guser) {
45123 + g_tmp = (struct acl_object_label *)
45124 + acl_alloc(sizeof (struct acl_object_label));
45125 + if (g_tmp == NULL)
45126 + return -ENOMEM;
45127 +
45128 + if (copy_from_user(g_tmp, *guser,
45129 + sizeof (struct acl_object_label)))
45130 + return -EFAULT;
45131 +
45132 + len = strnlen_user(g_tmp->filename, PATH_MAX);
45133 +
45134 + if (!len || len >= PATH_MAX)
45135 + return -EINVAL;
45136 +
45137 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45138 + return -ENOMEM;
45139 +
45140 + if (copy_from_user(tmp, g_tmp->filename, len))
45141 + return -EFAULT;
45142 + tmp[len-1] = '\0';
45143 + g_tmp->filename = tmp;
45144 +
45145 + *guser = g_tmp;
45146 + guser = &(g_tmp->next);
45147 + }
45148 +
45149 + return 0;
45150 +}
45151 +
45152 +static int
45153 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45154 + struct acl_role_label *role)
45155 +{
45156 + struct acl_object_label *o_tmp;
45157 + unsigned int len;
45158 + int ret;
45159 + char *tmp;
45160 +
45161 + while (userp) {
45162 + if ((o_tmp = (struct acl_object_label *)
45163 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
45164 + return -ENOMEM;
45165 +
45166 + if (copy_from_user(o_tmp, userp,
45167 + sizeof (struct acl_object_label)))
45168 + return -EFAULT;
45169 +
45170 + userp = o_tmp->prev;
45171 +
45172 + len = strnlen_user(o_tmp->filename, PATH_MAX);
45173 +
45174 + if (!len || len >= PATH_MAX)
45175 + return -EINVAL;
45176 +
45177 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45178 + return -ENOMEM;
45179 +
45180 + if (copy_from_user(tmp, o_tmp->filename, len))
45181 + return -EFAULT;
45182 + tmp[len-1] = '\0';
45183 + o_tmp->filename = tmp;
45184 +
45185 + insert_acl_obj_label(o_tmp, subj);
45186 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45187 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45188 + return -ENOMEM;
45189 +
45190 + ret = copy_user_glob(o_tmp);
45191 + if (ret)
45192 + return ret;
45193 +
45194 + if (o_tmp->nested) {
45195 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45196 + if (IS_ERR(o_tmp->nested))
45197 + return PTR_ERR(o_tmp->nested);
45198 +
45199 + /* insert into nested subject list */
45200 + o_tmp->nested->next = role->hash->first;
45201 + role->hash->first = o_tmp->nested;
45202 + }
45203 + }
45204 +
45205 + return 0;
45206 +}
45207 +
45208 +static __u32
45209 +count_user_subjs(struct acl_subject_label *userp)
45210 +{
45211 + struct acl_subject_label s_tmp;
45212 + __u32 num = 0;
45213 +
45214 + while (userp) {
45215 + if (copy_from_user(&s_tmp, userp,
45216 + sizeof (struct acl_subject_label)))
45217 + break;
45218 +
45219 + userp = s_tmp.prev;
45220 + /* do not count nested subjects against this count, since
45221 + they are not included in the hash table, but are
45222 + attached to objects. We have already counted
45223 + the subjects in userspace for the allocation
45224 + stack
45225 + */
45226 + if (!(s_tmp.mode & GR_NESTED))
45227 + num++;
45228 + }
45229 +
45230 + return num;
45231 +}
45232 +
45233 +static int
45234 +copy_user_allowedips(struct acl_role_label *rolep)
45235 +{
45236 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45237 +
45238 + ruserip = rolep->allowed_ips;
45239 +
45240 + while (ruserip) {
45241 + rlast = rtmp;
45242 +
45243 + if ((rtmp = (struct role_allowed_ip *)
45244 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45245 + return -ENOMEM;
45246 +
45247 + if (copy_from_user(rtmp, ruserip,
45248 + sizeof (struct role_allowed_ip)))
45249 + return -EFAULT;
45250 +
45251 + ruserip = rtmp->prev;
45252 +
45253 + if (!rlast) {
45254 + rtmp->prev = NULL;
45255 + rolep->allowed_ips = rtmp;
45256 + } else {
45257 + rlast->next = rtmp;
45258 + rtmp->prev = rlast;
45259 + }
45260 +
45261 + if (!ruserip)
45262 + rtmp->next = NULL;
45263 + }
45264 +
45265 + return 0;
45266 +}
45267 +
45268 +static int
45269 +copy_user_transitions(struct acl_role_label *rolep)
45270 +{
45271 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
45272 +
45273 + unsigned int len;
45274 + char *tmp;
45275 +
45276 + rusertp = rolep->transitions;
45277 +
45278 + while (rusertp) {
45279 + rlast = rtmp;
45280 +
45281 + if ((rtmp = (struct role_transition *)
45282 + acl_alloc(sizeof (struct role_transition))) == NULL)
45283 + return -ENOMEM;
45284 +
45285 + if (copy_from_user(rtmp, rusertp,
45286 + sizeof (struct role_transition)))
45287 + return -EFAULT;
45288 +
45289 + rusertp = rtmp->prev;
45290 +
45291 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45292 +
45293 + if (!len || len >= GR_SPROLE_LEN)
45294 + return -EINVAL;
45295 +
45296 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45297 + return -ENOMEM;
45298 +
45299 + if (copy_from_user(tmp, rtmp->rolename, len))
45300 + return -EFAULT;
45301 + tmp[len-1] = '\0';
45302 + rtmp->rolename = tmp;
45303 +
45304 + if (!rlast) {
45305 + rtmp->prev = NULL;
45306 + rolep->transitions = rtmp;
45307 + } else {
45308 + rlast->next = rtmp;
45309 + rtmp->prev = rlast;
45310 + }
45311 +
45312 + if (!rusertp)
45313 + rtmp->next = NULL;
45314 + }
45315 +
45316 + return 0;
45317 +}
45318 +
45319 +static struct acl_subject_label *
45320 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45321 +{
45322 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45323 + unsigned int len;
45324 + char *tmp;
45325 + __u32 num_objs;
45326 + struct acl_ip_label **i_tmp, *i_utmp2;
45327 + struct gr_hash_struct ghash;
45328 + struct subject_map *subjmap;
45329 + unsigned int i_num;
45330 + int err;
45331 +
45332 + s_tmp = lookup_subject_map(userp);
45333 +
45334 + /* we've already copied this subject into the kernel, just return
45335 + the reference to it, and don't copy it over again
45336 + */
45337 + if (s_tmp)
45338 + return(s_tmp);
45339 +
45340 + if ((s_tmp = (struct acl_subject_label *)
45341 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45342 + return ERR_PTR(-ENOMEM);
45343 +
45344 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45345 + if (subjmap == NULL)
45346 + return ERR_PTR(-ENOMEM);
45347 +
45348 + subjmap->user = userp;
45349 + subjmap->kernel = s_tmp;
45350 + insert_subj_map_entry(subjmap);
45351 +
45352 + if (copy_from_user(s_tmp, userp,
45353 + sizeof (struct acl_subject_label)))
45354 + return ERR_PTR(-EFAULT);
45355 +
45356 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45357 +
45358 + if (!len || len >= PATH_MAX)
45359 + return ERR_PTR(-EINVAL);
45360 +
45361 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45362 + return ERR_PTR(-ENOMEM);
45363 +
45364 + if (copy_from_user(tmp, s_tmp->filename, len))
45365 + return ERR_PTR(-EFAULT);
45366 + tmp[len-1] = '\0';
45367 + s_tmp->filename = tmp;
45368 +
45369 + if (!strcmp(s_tmp->filename, "/"))
45370 + role->root_label = s_tmp;
45371 +
45372 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45373 + return ERR_PTR(-EFAULT);
45374 +
45375 + /* copy user and group transition tables */
45376 +
45377 + if (s_tmp->user_trans_num) {
45378 + uid_t *uidlist;
45379 +
45380 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45381 + if (uidlist == NULL)
45382 + return ERR_PTR(-ENOMEM);
45383 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45384 + return ERR_PTR(-EFAULT);
45385 +
45386 + s_tmp->user_transitions = uidlist;
45387 + }
45388 +
45389 + if (s_tmp->group_trans_num) {
45390 + gid_t *gidlist;
45391 +
45392 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45393 + if (gidlist == NULL)
45394 + return ERR_PTR(-ENOMEM);
45395 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45396 + return ERR_PTR(-EFAULT);
45397 +
45398 + s_tmp->group_transitions = gidlist;
45399 + }
45400 +
45401 + /* set up object hash table */
45402 + num_objs = count_user_objs(ghash.first);
45403 +
45404 + s_tmp->obj_hash_size = num_objs;
45405 + s_tmp->obj_hash =
45406 + (struct acl_object_label **)
45407 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45408 +
45409 + if (!s_tmp->obj_hash)
45410 + return ERR_PTR(-ENOMEM);
45411 +
45412 + memset(s_tmp->obj_hash, 0,
45413 + s_tmp->obj_hash_size *
45414 + sizeof (struct acl_object_label *));
45415 +
45416 + /* add in objects */
45417 + err = copy_user_objs(ghash.first, s_tmp, role);
45418 +
45419 + if (err)
45420 + return ERR_PTR(err);
45421 +
45422 + /* set pointer for parent subject */
45423 + if (s_tmp->parent_subject) {
45424 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45425 +
45426 + if (IS_ERR(s_tmp2))
45427 + return s_tmp2;
45428 +
45429 + s_tmp->parent_subject = s_tmp2;
45430 + }
45431 +
45432 + /* add in ip acls */
45433 +
45434 + if (!s_tmp->ip_num) {
45435 + s_tmp->ips = NULL;
45436 + goto insert;
45437 + }
45438 +
45439 + i_tmp =
45440 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45441 + sizeof (struct acl_ip_label *));
45442 +
45443 + if (!i_tmp)
45444 + return ERR_PTR(-ENOMEM);
45445 +
45446 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45447 + *(i_tmp + i_num) =
45448 + (struct acl_ip_label *)
45449 + acl_alloc(sizeof (struct acl_ip_label));
45450 + if (!*(i_tmp + i_num))
45451 + return ERR_PTR(-ENOMEM);
45452 +
45453 + if (copy_from_user
45454 + (&i_utmp2, s_tmp->ips + i_num,
45455 + sizeof (struct acl_ip_label *)))
45456 + return ERR_PTR(-EFAULT);
45457 +
45458 + if (copy_from_user
45459 + (*(i_tmp + i_num), i_utmp2,
45460 + sizeof (struct acl_ip_label)))
45461 + return ERR_PTR(-EFAULT);
45462 +
45463 + if ((*(i_tmp + i_num))->iface == NULL)
45464 + continue;
45465 +
45466 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45467 + if (!len || len >= IFNAMSIZ)
45468 + return ERR_PTR(-EINVAL);
45469 + tmp = acl_alloc(len);
45470 + if (tmp == NULL)
45471 + return ERR_PTR(-ENOMEM);
45472 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45473 + return ERR_PTR(-EFAULT);
45474 + (*(i_tmp + i_num))->iface = tmp;
45475 + }
45476 +
45477 + s_tmp->ips = i_tmp;
45478 +
45479 +insert:
45480 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45481 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45482 + return ERR_PTR(-ENOMEM);
45483 +
45484 + return s_tmp;
45485 +}
45486 +
45487 +static int
45488 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45489 +{
45490 + struct acl_subject_label s_pre;
45491 + struct acl_subject_label * ret;
45492 + int err;
45493 +
45494 + while (userp) {
45495 + if (copy_from_user(&s_pre, userp,
45496 + sizeof (struct acl_subject_label)))
45497 + return -EFAULT;
45498 +
45499 + /* do not add nested subjects here, add
45500 + while parsing objects
45501 + */
45502 +
45503 + if (s_pre.mode & GR_NESTED) {
45504 + userp = s_pre.prev;
45505 + continue;
45506 + }
45507 +
45508 + ret = do_copy_user_subj(userp, role);
45509 +
45510 + err = PTR_ERR(ret);
45511 + if (IS_ERR(ret))
45512 + return err;
45513 +
45514 + insert_acl_subj_label(ret, role);
45515 +
45516 + userp = s_pre.prev;
45517 + }
45518 +
45519 + return 0;
45520 +}
45521 +
45522 +static int
45523 +copy_user_acl(struct gr_arg *arg)
45524 +{
45525 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45526 + struct sprole_pw *sptmp;
45527 + struct gr_hash_struct *ghash;
45528 + uid_t *domainlist;
45529 + unsigned int r_num;
45530 + unsigned int len;
45531 + char *tmp;
45532 + int err = 0;
45533 + __u16 i;
45534 + __u32 num_subjs;
45535 +
45536 + /* we need a default and kernel role */
45537 + if (arg->role_db.num_roles < 2)
45538 + return -EINVAL;
45539 +
45540 + /* copy special role authentication info from userspace */
45541 +
45542 + num_sprole_pws = arg->num_sprole_pws;
45543 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45544 +
45545 + if (!acl_special_roles) {
45546 + err = -ENOMEM;
45547 + goto cleanup;
45548 + }
45549 +
45550 + for (i = 0; i < num_sprole_pws; i++) {
45551 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45552 + if (!sptmp) {
45553 + err = -ENOMEM;
45554 + goto cleanup;
45555 + }
45556 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45557 + sizeof (struct sprole_pw))) {
45558 + err = -EFAULT;
45559 + goto cleanup;
45560 + }
45561 +
45562 + len =
45563 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45564 +
45565 + if (!len || len >= GR_SPROLE_LEN) {
45566 + err = -EINVAL;
45567 + goto cleanup;
45568 + }
45569 +
45570 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45571 + err = -ENOMEM;
45572 + goto cleanup;
45573 + }
45574 +
45575 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45576 + err = -EFAULT;
45577 + goto cleanup;
45578 + }
45579 + tmp[len-1] = '\0';
45580 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45581 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45582 +#endif
45583 + sptmp->rolename = tmp;
45584 + acl_special_roles[i] = sptmp;
45585 + }
45586 +
45587 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45588 +
45589 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45590 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45591 +
45592 + if (!r_tmp) {
45593 + err = -ENOMEM;
45594 + goto cleanup;
45595 + }
45596 +
45597 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45598 + sizeof (struct acl_role_label *))) {
45599 + err = -EFAULT;
45600 + goto cleanup;
45601 + }
45602 +
45603 + if (copy_from_user(r_tmp, r_utmp2,
45604 + sizeof (struct acl_role_label))) {
45605 + err = -EFAULT;
45606 + goto cleanup;
45607 + }
45608 +
45609 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45610 +
45611 + if (!len || len >= PATH_MAX) {
45612 + err = -EINVAL;
45613 + goto cleanup;
45614 + }
45615 +
45616 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45617 + err = -ENOMEM;
45618 + goto cleanup;
45619 + }
45620 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45621 + err = -EFAULT;
45622 + goto cleanup;
45623 + }
45624 + tmp[len-1] = '\0';
45625 + r_tmp->rolename = tmp;
45626 +
45627 + if (!strcmp(r_tmp->rolename, "default")
45628 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45629 + default_role = r_tmp;
45630 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45631 + kernel_role = r_tmp;
45632 + }
45633 +
45634 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45635 + err = -ENOMEM;
45636 + goto cleanup;
45637 + }
45638 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45639 + err = -EFAULT;
45640 + goto cleanup;
45641 + }
45642 +
45643 + r_tmp->hash = ghash;
45644 +
45645 + num_subjs = count_user_subjs(r_tmp->hash->first);
45646 +
45647 + r_tmp->subj_hash_size = num_subjs;
45648 + r_tmp->subj_hash =
45649 + (struct acl_subject_label **)
45650 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45651 +
45652 + if (!r_tmp->subj_hash) {
45653 + err = -ENOMEM;
45654 + goto cleanup;
45655 + }
45656 +
45657 + err = copy_user_allowedips(r_tmp);
45658 + if (err)
45659 + goto cleanup;
45660 +
45661 + /* copy domain info */
45662 + if (r_tmp->domain_children != NULL) {
45663 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45664 + if (domainlist == NULL) {
45665 + err = -ENOMEM;
45666 + goto cleanup;
45667 + }
45668 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45669 + err = -EFAULT;
45670 + goto cleanup;
45671 + }
45672 + r_tmp->domain_children = domainlist;
45673 + }
45674 +
45675 + err = copy_user_transitions(r_tmp);
45676 + if (err)
45677 + goto cleanup;
45678 +
45679 + memset(r_tmp->subj_hash, 0,
45680 + r_tmp->subj_hash_size *
45681 + sizeof (struct acl_subject_label *));
45682 +
45683 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45684 +
45685 + if (err)
45686 + goto cleanup;
45687 +
45688 + /* set nested subject list to null */
45689 + r_tmp->hash->first = NULL;
45690 +
45691 + insert_acl_role_label(r_tmp);
45692 + }
45693 +
45694 + goto return_err;
45695 + cleanup:
45696 + free_variables();
45697 + return_err:
45698 + return err;
45699 +
45700 +}
45701 +
45702 +static int
45703 +gracl_init(struct gr_arg *args)
45704 +{
45705 + int error = 0;
45706 +
45707 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45708 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45709 +
45710 + if (init_variables(args)) {
45711 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45712 + error = -ENOMEM;
45713 + free_variables();
45714 + goto out;
45715 + }
45716 +
45717 + error = copy_user_acl(args);
45718 + free_init_variables();
45719 + if (error) {
45720 + free_variables();
45721 + goto out;
45722 + }
45723 +
45724 + if ((error = gr_set_acls(0))) {
45725 + free_variables();
45726 + goto out;
45727 + }
45728 +
45729 + pax_open_kernel();
45730 + gr_status |= GR_READY;
45731 + pax_close_kernel();
45732 +
45733 + out:
45734 + return error;
45735 +}
45736 +
45737 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45738 +
45739 +static int
45740 +glob_match(const char *p, const char *n)
45741 +{
45742 + char c;
45743 +
45744 + while ((c = *p++) != '\0') {
45745 + switch (c) {
45746 + case '?':
45747 + if (*n == '\0')
45748 + return 1;
45749 + else if (*n == '/')
45750 + return 1;
45751 + break;
45752 + case '\\':
45753 + if (*n != c)
45754 + return 1;
45755 + break;
45756 + case '*':
45757 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45758 + if (*n == '/')
45759 + return 1;
45760 + else if (c == '?') {
45761 + if (*n == '\0')
45762 + return 1;
45763 + else
45764 + ++n;
45765 + }
45766 + }
45767 + if (c == '\0') {
45768 + return 0;
45769 + } else {
45770 + const char *endp;
45771 +
45772 + if ((endp = strchr(n, '/')) == NULL)
45773 + endp = n + strlen(n);
45774 +
45775 + if (c == '[') {
45776 + for (--p; n < endp; ++n)
45777 + if (!glob_match(p, n))
45778 + return 0;
45779 + } else if (c == '/') {
45780 + while (*n != '\0' && *n != '/')
45781 + ++n;
45782 + if (*n == '/' && !glob_match(p, n + 1))
45783 + return 0;
45784 + } else {
45785 + for (--p; n < endp; ++n)
45786 + if (*n == c && !glob_match(p, n))
45787 + return 0;
45788 + }
45789 +
45790 + return 1;
45791 + }
45792 + case '[':
45793 + {
45794 + int not;
45795 + char cold;
45796 +
45797 + if (*n == '\0' || *n == '/')
45798 + return 1;
45799 +
45800 + not = (*p == '!' || *p == '^');
45801 + if (not)
45802 + ++p;
45803 +
45804 + c = *p++;
45805 + for (;;) {
45806 + unsigned char fn = (unsigned char)*n;
45807 +
45808 + if (c == '\0')
45809 + return 1;
45810 + else {
45811 + if (c == fn)
45812 + goto matched;
45813 + cold = c;
45814 + c = *p++;
45815 +
45816 + if (c == '-' && *p != ']') {
45817 + unsigned char cend = *p++;
45818 +
45819 + if (cend == '\0')
45820 + return 1;
45821 +
45822 + if (cold <= fn && fn <= cend)
45823 + goto matched;
45824 +
45825 + c = *p++;
45826 + }
45827 + }
45828 +
45829 + if (c == ']')
45830 + break;
45831 + }
45832 + if (!not)
45833 + return 1;
45834 + break;
45835 + matched:
45836 + while (c != ']') {
45837 + if (c == '\0')
45838 + return 1;
45839 +
45840 + c = *p++;
45841 + }
45842 + if (not)
45843 + return 1;
45844 + }
45845 + break;
45846 + default:
45847 + if (c != *n)
45848 + return 1;
45849 + }
45850 +
45851 + ++n;
45852 + }
45853 +
45854 + if (*n == '\0')
45855 + return 0;
45856 +
45857 + if (*n == '/')
45858 + return 0;
45859 +
45860 + return 1;
45861 +}
45862 +
45863 +static struct acl_object_label *
45864 +chk_glob_label(struct acl_object_label *globbed,
45865 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45866 +{
45867 + struct acl_object_label *tmp;
45868 +
45869 + if (*path == NULL)
45870 + *path = gr_to_filename_nolock(dentry, mnt);
45871 +
45872 + tmp = globbed;
45873 +
45874 + while (tmp) {
45875 + if (!glob_match(tmp->filename, *path))
45876 + return tmp;
45877 + tmp = tmp->next;
45878 + }
45879 +
45880 + return NULL;
45881 +}
45882 +
45883 +static struct acl_object_label *
45884 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45885 + const ino_t curr_ino, const dev_t curr_dev,
45886 + const struct acl_subject_label *subj, char **path, const int checkglob)
45887 +{
45888 + struct acl_subject_label *tmpsubj;
45889 + struct acl_object_label *retval;
45890 + struct acl_object_label *retval2;
45891 +
45892 + tmpsubj = (struct acl_subject_label *) subj;
45893 + read_lock(&gr_inode_lock);
45894 + do {
45895 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45896 + if (retval) {
45897 + if (checkglob && retval->globbed) {
45898 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45899 + (struct vfsmount *)orig_mnt, path);
45900 + if (retval2)
45901 + retval = retval2;
45902 + }
45903 + break;
45904 + }
45905 + } while ((tmpsubj = tmpsubj->parent_subject));
45906 + read_unlock(&gr_inode_lock);
45907 +
45908 + return retval;
45909 +}
45910 +
45911 +static __inline__ struct acl_object_label *
45912 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45913 + struct dentry *curr_dentry,
45914 + const struct acl_subject_label *subj, char **path, const int checkglob)
45915 +{
45916 + int newglob = checkglob;
45917 + ino_t inode;
45918 + dev_t device;
45919 +
45920 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45921 + as we don't want a / * rule to match instead of the / object
45922 + don't do this for create lookups that call this function though, since they're looking up
45923 + on the parent and thus need globbing checks on all paths
45924 + */
45925 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45926 + newglob = GR_NO_GLOB;
45927 +
45928 + spin_lock(&curr_dentry->d_lock);
45929 + inode = curr_dentry->d_inode->i_ino;
45930 + device = __get_dev(curr_dentry);
45931 + spin_unlock(&curr_dentry->d_lock);
45932 +
45933 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
45934 +}
45935 +
45936 +static struct acl_object_label *
45937 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45938 + const struct acl_subject_label *subj, char *path, const int checkglob)
45939 +{
45940 + struct dentry *dentry = (struct dentry *) l_dentry;
45941 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45942 + struct acl_object_label *retval;
45943 + struct dentry *parent;
45944 +
45945 + write_seqlock(&rename_lock);
45946 + br_read_lock(vfsmount_lock);
45947 +
45948 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45949 +#ifdef CONFIG_NET
45950 + mnt == sock_mnt ||
45951 +#endif
45952 +#ifdef CONFIG_HUGETLBFS
45953 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45954 +#endif
45955 + /* ignore Eric Biederman */
45956 + IS_PRIVATE(l_dentry->d_inode))) {
45957 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45958 + goto out;
45959 + }
45960 +
45961 + for (;;) {
45962 + if (dentry == real_root.dentry && mnt == real_root.mnt)
45963 + break;
45964 +
45965 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45966 + if (mnt->mnt_parent == mnt)
45967 + break;
45968 +
45969 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45970 + if (retval != NULL)
45971 + goto out;
45972 +
45973 + dentry = mnt->mnt_mountpoint;
45974 + mnt = mnt->mnt_parent;
45975 + continue;
45976 + }
45977 +
45978 + parent = dentry->d_parent;
45979 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45980 + if (retval != NULL)
45981 + goto out;
45982 +
45983 + dentry = parent;
45984 + }
45985 +
45986 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45987 +
45988 + /* real_root is pinned so we don't have to hold a reference */
45989 + if (retval == NULL)
45990 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
45991 +out:
45992 + br_read_unlock(vfsmount_lock);
45993 + write_sequnlock(&rename_lock);
45994 +
45995 + BUG_ON(retval == NULL);
45996 +
45997 + return retval;
45998 +}
45999 +
46000 +static __inline__ struct acl_object_label *
46001 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46002 + const struct acl_subject_label *subj)
46003 +{
46004 + char *path = NULL;
46005 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46006 +}
46007 +
46008 +static __inline__ struct acl_object_label *
46009 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46010 + const struct acl_subject_label *subj)
46011 +{
46012 + char *path = NULL;
46013 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46014 +}
46015 +
46016 +static __inline__ struct acl_object_label *
46017 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46018 + const struct acl_subject_label *subj, char *path)
46019 +{
46020 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46021 +}
46022 +
46023 +static struct acl_subject_label *
46024 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46025 + const struct acl_role_label *role)
46026 +{
46027 + struct dentry *dentry = (struct dentry *) l_dentry;
46028 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46029 + struct acl_subject_label *retval;
46030 + struct dentry *parent;
46031 +
46032 + write_seqlock(&rename_lock);
46033 + br_read_lock(vfsmount_lock);
46034 +
46035 + for (;;) {
46036 + if (dentry == real_root.dentry && mnt == real_root.mnt)
46037 + break;
46038 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46039 + if (mnt->mnt_parent == mnt)
46040 + break;
46041 +
46042 + spin_lock(&dentry->d_lock);
46043 + read_lock(&gr_inode_lock);
46044 + retval =
46045 + lookup_acl_subj_label(dentry->d_inode->i_ino,
46046 + __get_dev(dentry), role);
46047 + read_unlock(&gr_inode_lock);
46048 + spin_unlock(&dentry->d_lock);
46049 + if (retval != NULL)
46050 + goto out;
46051 +
46052 + dentry = mnt->mnt_mountpoint;
46053 + mnt = mnt->mnt_parent;
46054 + continue;
46055 + }
46056 +
46057 + spin_lock(&dentry->d_lock);
46058 + read_lock(&gr_inode_lock);
46059 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46060 + __get_dev(dentry), role);
46061 + read_unlock(&gr_inode_lock);
46062 + parent = dentry->d_parent;
46063 + spin_unlock(&dentry->d_lock);
46064 +
46065 + if (retval != NULL)
46066 + goto out;
46067 +
46068 + dentry = parent;
46069 + }
46070 +
46071 + spin_lock(&dentry->d_lock);
46072 + read_lock(&gr_inode_lock);
46073 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46074 + __get_dev(dentry), role);
46075 + read_unlock(&gr_inode_lock);
46076 + spin_unlock(&dentry->d_lock);
46077 +
46078 + if (unlikely(retval == NULL)) {
46079 + /* real_root is pinned, we don't need to hold a reference */
46080 + read_lock(&gr_inode_lock);
46081 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46082 + __get_dev(real_root.dentry), role);
46083 + read_unlock(&gr_inode_lock);
46084 + }
46085 +out:
46086 + br_read_unlock(vfsmount_lock);
46087 + write_sequnlock(&rename_lock);
46088 +
46089 + BUG_ON(retval == NULL);
46090 +
46091 + return retval;
46092 +}
46093 +
46094 +static void
46095 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46096 +{
46097 + struct task_struct *task = current;
46098 + const struct cred *cred = current_cred();
46099 +
46100 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46101 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46102 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46103 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46104 +
46105 + return;
46106 +}
46107 +
46108 +static void
46109 +gr_log_learn_sysctl(const char *path, const __u32 mode)
46110 +{
46111 + struct task_struct *task = current;
46112 + const struct cred *cred = current_cred();
46113 +
46114 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46115 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46116 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46117 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46118 +
46119 + return;
46120 +}
46121 +
46122 +static void
46123 +gr_log_learn_id_change(const char type, const unsigned int real,
46124 + const unsigned int effective, const unsigned int fs)
46125 +{
46126 + struct task_struct *task = current;
46127 + const struct cred *cred = current_cred();
46128 +
46129 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46130 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46131 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46132 + type, real, effective, fs, &task->signal->saved_ip);
46133 +
46134 + return;
46135 +}
46136 +
46137 +__u32
46138 +gr_check_link(const struct dentry * new_dentry,
46139 + const struct dentry * parent_dentry,
46140 + const struct vfsmount * parent_mnt,
46141 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46142 +{
46143 + struct acl_object_label *obj;
46144 + __u32 oldmode, newmode;
46145 + __u32 needmode;
46146 +
46147 + if (unlikely(!(gr_status & GR_READY)))
46148 + return (GR_CREATE | GR_LINK);
46149 +
46150 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46151 + oldmode = obj->mode;
46152 +
46153 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46154 + oldmode |= (GR_CREATE | GR_LINK);
46155 +
46156 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46157 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46158 + needmode |= GR_SETID | GR_AUDIT_SETID;
46159 +
46160 + newmode =
46161 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46162 + oldmode | needmode);
46163 +
46164 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46165 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46166 + GR_INHERIT | GR_AUDIT_INHERIT);
46167 +
46168 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46169 + goto bad;
46170 +
46171 + if ((oldmode & needmode) != needmode)
46172 + goto bad;
46173 +
46174 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46175 + if ((newmode & needmode) != needmode)
46176 + goto bad;
46177 +
46178 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46179 + return newmode;
46180 +bad:
46181 + needmode = oldmode;
46182 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46183 + needmode |= GR_SETID;
46184 +
46185 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46186 + gr_log_learn(old_dentry, old_mnt, needmode);
46187 + return (GR_CREATE | GR_LINK);
46188 + } else if (newmode & GR_SUPPRESS)
46189 + return GR_SUPPRESS;
46190 + else
46191 + return 0;
46192 +}
46193 +
46194 +__u32
46195 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46196 + const struct vfsmount * mnt)
46197 +{
46198 + __u32 retval = mode;
46199 + struct acl_subject_label *curracl;
46200 + struct acl_object_label *currobj;
46201 +
46202 + if (unlikely(!(gr_status & GR_READY)))
46203 + return (mode & ~GR_AUDITS);
46204 +
46205 + curracl = current->acl;
46206 +
46207 + currobj = chk_obj_label(dentry, mnt, curracl);
46208 + retval = currobj->mode & mode;
46209 +
46210 + /* if we're opening a specified transfer file for writing
46211 + (e.g. /dev/initctl), then transfer our role to init
46212 + */
46213 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46214 + current->role->roletype & GR_ROLE_PERSIST)) {
46215 + struct task_struct *task = init_pid_ns.child_reaper;
46216 +
46217 + if (task->role != current->role) {
46218 + task->acl_sp_role = 0;
46219 + task->acl_role_id = current->acl_role_id;
46220 + task->role = current->role;
46221 + rcu_read_lock();
46222 + read_lock(&grsec_exec_file_lock);
46223 + gr_apply_subject_to_task(task);
46224 + read_unlock(&grsec_exec_file_lock);
46225 + rcu_read_unlock();
46226 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46227 + }
46228 + }
46229 +
46230 + if (unlikely
46231 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46232 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46233 + __u32 new_mode = mode;
46234 +
46235 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46236 +
46237 + retval = new_mode;
46238 +
46239 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46240 + new_mode |= GR_INHERIT;
46241 +
46242 + if (!(mode & GR_NOLEARN))
46243 + gr_log_learn(dentry, mnt, new_mode);
46244 + }
46245 +
46246 + return retval;
46247 +}
46248 +
46249 +__u32
46250 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46251 + const struct vfsmount * mnt, const __u32 mode)
46252 +{
46253 + struct name_entry *match;
46254 + struct acl_object_label *matchpo;
46255 + struct acl_subject_label *curracl;
46256 + char *path;
46257 + __u32 retval;
46258 +
46259 + if (unlikely(!(gr_status & GR_READY)))
46260 + return (mode & ~GR_AUDITS);
46261 +
46262 + preempt_disable();
46263 + path = gr_to_filename_rbac(new_dentry, mnt);
46264 + match = lookup_name_entry_create(path);
46265 +
46266 + if (!match)
46267 + goto check_parent;
46268 +
46269 + curracl = current->acl;
46270 +
46271 + read_lock(&gr_inode_lock);
46272 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46273 + read_unlock(&gr_inode_lock);
46274 +
46275 + if (matchpo) {
46276 + if ((matchpo->mode & mode) !=
46277 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
46278 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46279 + __u32 new_mode = mode;
46280 +
46281 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46282 +
46283 + gr_log_learn(new_dentry, mnt, new_mode);
46284 +
46285 + preempt_enable();
46286 + return new_mode;
46287 + }
46288 + preempt_enable();
46289 + return (matchpo->mode & mode);
46290 + }
46291 +
46292 + check_parent:
46293 + curracl = current->acl;
46294 +
46295 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46296 + retval = matchpo->mode & mode;
46297 +
46298 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46299 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46300 + __u32 new_mode = mode;
46301 +
46302 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46303 +
46304 + gr_log_learn(new_dentry, mnt, new_mode);
46305 + preempt_enable();
46306 + return new_mode;
46307 + }
46308 +
46309 + preempt_enable();
46310 + return retval;
46311 +}
46312 +
46313 +int
46314 +gr_check_hidden_task(const struct task_struct *task)
46315 +{
46316 + if (unlikely(!(gr_status & GR_READY)))
46317 + return 0;
46318 +
46319 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46320 + return 1;
46321 +
46322 + return 0;
46323 +}
46324 +
46325 +int
46326 +gr_check_protected_task(const struct task_struct *task)
46327 +{
46328 + if (unlikely(!(gr_status & GR_READY) || !task))
46329 + return 0;
46330 +
46331 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46332 + task->acl != current->acl)
46333 + return 1;
46334 +
46335 + return 0;
46336 +}
46337 +
46338 +int
46339 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46340 +{
46341 + struct task_struct *p;
46342 + int ret = 0;
46343 +
46344 + if (unlikely(!(gr_status & GR_READY) || !pid))
46345 + return ret;
46346 +
46347 + read_lock(&tasklist_lock);
46348 + do_each_pid_task(pid, type, p) {
46349 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46350 + p->acl != current->acl) {
46351 + ret = 1;
46352 + goto out;
46353 + }
46354 + } while_each_pid_task(pid, type, p);
46355 +out:
46356 + read_unlock(&tasklist_lock);
46357 +
46358 + return ret;
46359 +}
46360 +
46361 +void
46362 +gr_copy_label(struct task_struct *tsk)
46363 +{
46364 + tsk->signal->used_accept = 0;
46365 + tsk->acl_sp_role = 0;
46366 + tsk->acl_role_id = current->acl_role_id;
46367 + tsk->acl = current->acl;
46368 + tsk->role = current->role;
46369 + tsk->signal->curr_ip = current->signal->curr_ip;
46370 + tsk->signal->saved_ip = current->signal->saved_ip;
46371 + if (current->exec_file)
46372 + get_file(current->exec_file);
46373 + tsk->exec_file = current->exec_file;
46374 + tsk->is_writable = current->is_writable;
46375 + if (unlikely(current->signal->used_accept)) {
46376 + current->signal->curr_ip = 0;
46377 + current->signal->saved_ip = 0;
46378 + }
46379 +
46380 + return;
46381 +}
46382 +
46383 +static void
46384 +gr_set_proc_res(struct task_struct *task)
46385 +{
46386 + struct acl_subject_label *proc;
46387 + unsigned short i;
46388 +
46389 + proc = task->acl;
46390 +
46391 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46392 + return;
46393 +
46394 + for (i = 0; i < RLIM_NLIMITS; i++) {
46395 + if (!(proc->resmask & (1 << i)))
46396 + continue;
46397 +
46398 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46399 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46400 + }
46401 +
46402 + return;
46403 +}
46404 +
46405 +extern int __gr_process_user_ban(struct user_struct *user);
46406 +
46407 +int
46408 +gr_check_user_change(int real, int effective, int fs)
46409 +{
46410 + unsigned int i;
46411 + __u16 num;
46412 + uid_t *uidlist;
46413 + int curuid;
46414 + int realok = 0;
46415 + int effectiveok = 0;
46416 + int fsok = 0;
46417 +
46418 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46419 + struct user_struct *user;
46420 +
46421 + if (real == -1)
46422 + goto skipit;
46423 +
46424 + user = find_user(real);
46425 + if (user == NULL)
46426 + goto skipit;
46427 +
46428 + if (__gr_process_user_ban(user)) {
46429 + /* for find_user */
46430 + free_uid(user);
46431 + return 1;
46432 + }
46433 +
46434 + /* for find_user */
46435 + free_uid(user);
46436 +
46437 +skipit:
46438 +#endif
46439 +
46440 + if (unlikely(!(gr_status & GR_READY)))
46441 + return 0;
46442 +
46443 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46444 + gr_log_learn_id_change('u', real, effective, fs);
46445 +
46446 + num = current->acl->user_trans_num;
46447 + uidlist = current->acl->user_transitions;
46448 +
46449 + if (uidlist == NULL)
46450 + return 0;
46451 +
46452 + if (real == -1)
46453 + realok = 1;
46454 + if (effective == -1)
46455 + effectiveok = 1;
46456 + if (fs == -1)
46457 + fsok = 1;
46458 +
46459 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46460 + for (i = 0; i < num; i++) {
46461 + curuid = (int)uidlist[i];
46462 + if (real == curuid)
46463 + realok = 1;
46464 + if (effective == curuid)
46465 + effectiveok = 1;
46466 + if (fs == curuid)
46467 + fsok = 1;
46468 + }
46469 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46470 + for (i = 0; i < num; i++) {
46471 + curuid = (int)uidlist[i];
46472 + if (real == curuid)
46473 + break;
46474 + if (effective == curuid)
46475 + break;
46476 + if (fs == curuid)
46477 + break;
46478 + }
46479 + /* not in deny list */
46480 + if (i == num) {
46481 + realok = 1;
46482 + effectiveok = 1;
46483 + fsok = 1;
46484 + }
46485 + }
46486 +
46487 + if (realok && effectiveok && fsok)
46488 + return 0;
46489 + else {
46490 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46491 + return 1;
46492 + }
46493 +}
46494 +
46495 +int
46496 +gr_check_group_change(int real, int effective, int fs)
46497 +{
46498 + unsigned int i;
46499 + __u16 num;
46500 + gid_t *gidlist;
46501 + int curgid;
46502 + int realok = 0;
46503 + int effectiveok = 0;
46504 + int fsok = 0;
46505 +
46506 + if (unlikely(!(gr_status & GR_READY)))
46507 + return 0;
46508 +
46509 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46510 + gr_log_learn_id_change('g', real, effective, fs);
46511 +
46512 + num = current->acl->group_trans_num;
46513 + gidlist = current->acl->group_transitions;
46514 +
46515 + if (gidlist == NULL)
46516 + return 0;
46517 +
46518 + if (real == -1)
46519 + realok = 1;
46520 + if (effective == -1)
46521 + effectiveok = 1;
46522 + if (fs == -1)
46523 + fsok = 1;
46524 +
46525 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46526 + for (i = 0; i < num; i++) {
46527 + curgid = (int)gidlist[i];
46528 + if (real == curgid)
46529 + realok = 1;
46530 + if (effective == curgid)
46531 + effectiveok = 1;
46532 + if (fs == curgid)
46533 + fsok = 1;
46534 + }
46535 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46536 + for (i = 0; i < num; i++) {
46537 + curgid = (int)gidlist[i];
46538 + if (real == curgid)
46539 + break;
46540 + if (effective == curgid)
46541 + break;
46542 + if (fs == curgid)
46543 + break;
46544 + }
46545 + /* not in deny list */
46546 + if (i == num) {
46547 + realok = 1;
46548 + effectiveok = 1;
46549 + fsok = 1;
46550 + }
46551 + }
46552 +
46553 + if (realok && effectiveok && fsok)
46554 + return 0;
46555 + else {
46556 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46557 + return 1;
46558 + }
46559 +}
46560 +
46561 +void
46562 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46563 +{
46564 + struct acl_role_label *role = task->role;
46565 + struct acl_subject_label *subj = NULL;
46566 + struct acl_object_label *obj;
46567 + struct file *filp;
46568 +
46569 + if (unlikely(!(gr_status & GR_READY)))
46570 + return;
46571 +
46572 + filp = task->exec_file;
46573 +
46574 + /* kernel process, we'll give them the kernel role */
46575 + if (unlikely(!filp)) {
46576 + task->role = kernel_role;
46577 + task->acl = kernel_role->root_label;
46578 + return;
46579 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46580 + role = lookup_acl_role_label(task, uid, gid);
46581 +
46582 + /* perform subject lookup in possibly new role
46583 + we can use this result below in the case where role == task->role
46584 + */
46585 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46586 +
46587 + /* if we changed uid/gid, but result in the same role
46588 + and are using inheritance, don't lose the inherited subject
46589 + if current subject is other than what normal lookup
46590 + would result in, we arrived via inheritance, don't
46591 + lose subject
46592 + */
46593 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46594 + (subj == task->acl)))
46595 + task->acl = subj;
46596 +
46597 + task->role = role;
46598 +
46599 + task->is_writable = 0;
46600 +
46601 + /* ignore additional mmap checks for processes that are writable
46602 + by the default ACL */
46603 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46604 + if (unlikely(obj->mode & GR_WRITE))
46605 + task->is_writable = 1;
46606 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46607 + if (unlikely(obj->mode & GR_WRITE))
46608 + task->is_writable = 1;
46609 +
46610 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46611 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46612 +#endif
46613 +
46614 + gr_set_proc_res(task);
46615 +
46616 + return;
46617 +}
46618 +
46619 +int
46620 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46621 + const int unsafe_share)
46622 +{
46623 + struct task_struct *task = current;
46624 + struct acl_subject_label *newacl;
46625 + struct acl_object_label *obj;
46626 + __u32 retmode;
46627 +
46628 + if (unlikely(!(gr_status & GR_READY)))
46629 + return 0;
46630 +
46631 + newacl = chk_subj_label(dentry, mnt, task->role);
46632 +
46633 + task_lock(task);
46634 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46635 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46636 + !(task->role->roletype & GR_ROLE_GOD) &&
46637 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46638 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46639 + task_unlock(task);
46640 + if (unsafe_share)
46641 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46642 + else
46643 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46644 + return -EACCES;
46645 + }
46646 + task_unlock(task);
46647 +
46648 + obj = chk_obj_label(dentry, mnt, task->acl);
46649 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46650 +
46651 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46652 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46653 + if (obj->nested)
46654 + task->acl = obj->nested;
46655 + else
46656 + task->acl = newacl;
46657 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46658 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46659 +
46660 + task->is_writable = 0;
46661 +
46662 + /* ignore additional mmap checks for processes that are writable
46663 + by the default ACL */
46664 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46665 + if (unlikely(obj->mode & GR_WRITE))
46666 + task->is_writable = 1;
46667 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46668 + if (unlikely(obj->mode & GR_WRITE))
46669 + task->is_writable = 1;
46670 +
46671 + gr_set_proc_res(task);
46672 +
46673 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46674 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46675 +#endif
46676 + return 0;
46677 +}
46678 +
46679 +/* always called with valid inodev ptr */
46680 +static void
46681 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46682 +{
46683 + struct acl_object_label *matchpo;
46684 + struct acl_subject_label *matchps;
46685 + struct acl_subject_label *subj;
46686 + struct acl_role_label *role;
46687 + unsigned int x;
46688 +
46689 + FOR_EACH_ROLE_START(role)
46690 + FOR_EACH_SUBJECT_START(role, subj, x)
46691 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46692 + matchpo->mode |= GR_DELETED;
46693 + FOR_EACH_SUBJECT_END(subj,x)
46694 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46695 + if (subj->inode == ino && subj->device == dev)
46696 + subj->mode |= GR_DELETED;
46697 + FOR_EACH_NESTED_SUBJECT_END(subj)
46698 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46699 + matchps->mode |= GR_DELETED;
46700 + FOR_EACH_ROLE_END(role)
46701 +
46702 + inodev->nentry->deleted = 1;
46703 +
46704 + return;
46705 +}
46706 +
46707 +void
46708 +gr_handle_delete(const ino_t ino, const dev_t dev)
46709 +{
46710 + struct inodev_entry *inodev;
46711 +
46712 + if (unlikely(!(gr_status & GR_READY)))
46713 + return;
46714 +
46715 + write_lock(&gr_inode_lock);
46716 + inodev = lookup_inodev_entry(ino, dev);
46717 + if (inodev != NULL)
46718 + do_handle_delete(inodev, ino, dev);
46719 + write_unlock(&gr_inode_lock);
46720 +
46721 + return;
46722 +}
46723 +
46724 +static void
46725 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46726 + const ino_t newinode, const dev_t newdevice,
46727 + struct acl_subject_label *subj)
46728 +{
46729 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46730 + struct acl_object_label *match;
46731 +
46732 + match = subj->obj_hash[index];
46733 +
46734 + while (match && (match->inode != oldinode ||
46735 + match->device != olddevice ||
46736 + !(match->mode & GR_DELETED)))
46737 + match = match->next;
46738 +
46739 + if (match && (match->inode == oldinode)
46740 + && (match->device == olddevice)
46741 + && (match->mode & GR_DELETED)) {
46742 + if (match->prev == NULL) {
46743 + subj->obj_hash[index] = match->next;
46744 + if (match->next != NULL)
46745 + match->next->prev = NULL;
46746 + } else {
46747 + match->prev->next = match->next;
46748 + if (match->next != NULL)
46749 + match->next->prev = match->prev;
46750 + }
46751 + match->prev = NULL;
46752 + match->next = NULL;
46753 + match->inode = newinode;
46754 + match->device = newdevice;
46755 + match->mode &= ~GR_DELETED;
46756 +
46757 + insert_acl_obj_label(match, subj);
46758 + }
46759 +
46760 + return;
46761 +}
46762 +
46763 +static void
46764 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46765 + const ino_t newinode, const dev_t newdevice,
46766 + struct acl_role_label *role)
46767 +{
46768 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46769 + struct acl_subject_label *match;
46770 +
46771 + match = role->subj_hash[index];
46772 +
46773 + while (match && (match->inode != oldinode ||
46774 + match->device != olddevice ||
46775 + !(match->mode & GR_DELETED)))
46776 + match = match->next;
46777 +
46778 + if (match && (match->inode == oldinode)
46779 + && (match->device == olddevice)
46780 + && (match->mode & GR_DELETED)) {
46781 + if (match->prev == NULL) {
46782 + role->subj_hash[index] = match->next;
46783 + if (match->next != NULL)
46784 + match->next->prev = NULL;
46785 + } else {
46786 + match->prev->next = match->next;
46787 + if (match->next != NULL)
46788 + match->next->prev = match->prev;
46789 + }
46790 + match->prev = NULL;
46791 + match->next = NULL;
46792 + match->inode = newinode;
46793 + match->device = newdevice;
46794 + match->mode &= ~GR_DELETED;
46795 +
46796 + insert_acl_subj_label(match, role);
46797 + }
46798 +
46799 + return;
46800 +}
46801 +
46802 +static void
46803 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46804 + const ino_t newinode, const dev_t newdevice)
46805 +{
46806 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46807 + struct inodev_entry *match;
46808 +
46809 + match = inodev_set.i_hash[index];
46810 +
46811 + while (match && (match->nentry->inode != oldinode ||
46812 + match->nentry->device != olddevice || !match->nentry->deleted))
46813 + match = match->next;
46814 +
46815 + if (match && (match->nentry->inode == oldinode)
46816 + && (match->nentry->device == olddevice) &&
46817 + match->nentry->deleted) {
46818 + if (match->prev == NULL) {
46819 + inodev_set.i_hash[index] = match->next;
46820 + if (match->next != NULL)
46821 + match->next->prev = NULL;
46822 + } else {
46823 + match->prev->next = match->next;
46824 + if (match->next != NULL)
46825 + match->next->prev = match->prev;
46826 + }
46827 + match->prev = NULL;
46828 + match->next = NULL;
46829 + match->nentry->inode = newinode;
46830 + match->nentry->device = newdevice;
46831 + match->nentry->deleted = 0;
46832 +
46833 + insert_inodev_entry(match);
46834 + }
46835 +
46836 + return;
46837 +}
46838 +
46839 +static void
46840 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46841 + const struct vfsmount *mnt)
46842 +{
46843 + struct acl_subject_label *subj;
46844 + struct acl_role_label *role;
46845 + unsigned int x;
46846 + ino_t ino = dentry->d_inode->i_ino;
46847 + dev_t dev = __get_dev(dentry);
46848 +
46849 + FOR_EACH_ROLE_START(role)
46850 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
46851 +
46852 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46853 + if ((subj->inode == ino) && (subj->device == dev)) {
46854 + subj->inode = ino;
46855 + subj->device = dev;
46856 + }
46857 + FOR_EACH_NESTED_SUBJECT_END(subj)
46858 + FOR_EACH_SUBJECT_START(role, subj, x)
46859 + update_acl_obj_label(matchn->inode, matchn->device,
46860 + ino, dev, subj);
46861 + FOR_EACH_SUBJECT_END(subj,x)
46862 + FOR_EACH_ROLE_END(role)
46863 +
46864 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
46865 +
46866 + return;
46867 +}
46868 +
46869 +void
46870 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46871 +{
46872 + struct name_entry *matchn;
46873 +
46874 + if (unlikely(!(gr_status & GR_READY)))
46875 + return;
46876 +
46877 + preempt_disable();
46878 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46879 +
46880 + if (unlikely((unsigned long)matchn)) {
46881 + write_lock(&gr_inode_lock);
46882 + do_handle_create(matchn, dentry, mnt);
46883 + write_unlock(&gr_inode_lock);
46884 + }
46885 + preempt_enable();
46886 +
46887 + return;
46888 +}
46889 +
46890 +void
46891 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46892 + struct dentry *old_dentry,
46893 + struct dentry *new_dentry,
46894 + struct vfsmount *mnt, const __u8 replace)
46895 +{
46896 + struct name_entry *matchn;
46897 + struct inodev_entry *inodev;
46898 + ino_t old_ino = old_dentry->d_inode->i_ino;
46899 + dev_t old_dev = __get_dev(old_dentry);
46900 +
46901 + /* vfs_rename swaps the name and parent link for old_dentry and
46902 + new_dentry
46903 + at this point, old_dentry has the new name, parent link, and inode
46904 + for the renamed file
46905 + if a file is being replaced by a rename, new_dentry has the inode
46906 + and name for the replaced file
46907 + */
46908 +
46909 + if (unlikely(!(gr_status & GR_READY)))
46910 + return;
46911 +
46912 + preempt_disable();
46913 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46914 +
46915 + /* we wouldn't have to check d_inode if it weren't for
46916 + NFS silly-renaming
46917 + */
46918 +
46919 + write_lock(&gr_inode_lock);
46920 + if (unlikely(replace && new_dentry->d_inode)) {
46921 + ino_t new_ino = new_dentry->d_inode->i_ino;
46922 + dev_t new_dev = __get_dev(new_dentry);
46923 +
46924 + inodev = lookup_inodev_entry(new_ino, new_dev);
46925 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46926 + do_handle_delete(inodev, new_ino, new_dev);
46927 + }
46928 +
46929 + inodev = lookup_inodev_entry(old_ino, old_dev);
46930 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46931 + do_handle_delete(inodev, old_ino, old_dev);
46932 +
46933 + if (unlikely((unsigned long)matchn))
46934 + do_handle_create(matchn, old_dentry, mnt);
46935 +
46936 + write_unlock(&gr_inode_lock);
46937 + preempt_enable();
46938 +
46939 + return;
46940 +}
46941 +
46942 +static int
46943 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46944 + unsigned char **sum)
46945 +{
46946 + struct acl_role_label *r;
46947 + struct role_allowed_ip *ipp;
46948 + struct role_transition *trans;
46949 + unsigned int i;
46950 + int found = 0;
46951 + u32 curr_ip = current->signal->curr_ip;
46952 +
46953 + current->signal->saved_ip = curr_ip;
46954 +
46955 + /* check transition table */
46956 +
46957 + for (trans = current->role->transitions; trans; trans = trans->next) {
46958 + if (!strcmp(rolename, trans->rolename)) {
46959 + found = 1;
46960 + break;
46961 + }
46962 + }
46963 +
46964 + if (!found)
46965 + return 0;
46966 +
46967 + /* handle special roles that do not require authentication
46968 + and check ip */
46969 +
46970 + FOR_EACH_ROLE_START(r)
46971 + if (!strcmp(rolename, r->rolename) &&
46972 + (r->roletype & GR_ROLE_SPECIAL)) {
46973 + found = 0;
46974 + if (r->allowed_ips != NULL) {
46975 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46976 + if ((ntohl(curr_ip) & ipp->netmask) ==
46977 + (ntohl(ipp->addr) & ipp->netmask))
46978 + found = 1;
46979 + }
46980 + } else
46981 + found = 2;
46982 + if (!found)
46983 + return 0;
46984 +
46985 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46986 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46987 + *salt = NULL;
46988 + *sum = NULL;
46989 + return 1;
46990 + }
46991 + }
46992 + FOR_EACH_ROLE_END(r)
46993 +
46994 + for (i = 0; i < num_sprole_pws; i++) {
46995 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46996 + *salt = acl_special_roles[i]->salt;
46997 + *sum = acl_special_roles[i]->sum;
46998 + return 1;
46999 + }
47000 + }
47001 +
47002 + return 0;
47003 +}
47004 +
47005 +static void
47006 +assign_special_role(char *rolename)
47007 +{
47008 + struct acl_object_label *obj;
47009 + struct acl_role_label *r;
47010 + struct acl_role_label *assigned = NULL;
47011 + struct task_struct *tsk;
47012 + struct file *filp;
47013 +
47014 + FOR_EACH_ROLE_START(r)
47015 + if (!strcmp(rolename, r->rolename) &&
47016 + (r->roletype & GR_ROLE_SPECIAL)) {
47017 + assigned = r;
47018 + break;
47019 + }
47020 + FOR_EACH_ROLE_END(r)
47021 +
47022 + if (!assigned)
47023 + return;
47024 +
47025 + read_lock(&tasklist_lock);
47026 + read_lock(&grsec_exec_file_lock);
47027 +
47028 + tsk = current->real_parent;
47029 + if (tsk == NULL)
47030 + goto out_unlock;
47031 +
47032 + filp = tsk->exec_file;
47033 + if (filp == NULL)
47034 + goto out_unlock;
47035 +
47036 + tsk->is_writable = 0;
47037 +
47038 + tsk->acl_sp_role = 1;
47039 + tsk->acl_role_id = ++acl_sp_role_value;
47040 + tsk->role = assigned;
47041 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47042 +
47043 + /* ignore additional mmap checks for processes that are writable
47044 + by the default ACL */
47045 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47046 + if (unlikely(obj->mode & GR_WRITE))
47047 + tsk->is_writable = 1;
47048 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47049 + if (unlikely(obj->mode & GR_WRITE))
47050 + tsk->is_writable = 1;
47051 +
47052 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47053 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47054 +#endif
47055 +
47056 +out_unlock:
47057 + read_unlock(&grsec_exec_file_lock);
47058 + read_unlock(&tasklist_lock);
47059 + return;
47060 +}
47061 +
47062 +int gr_check_secure_terminal(struct task_struct *task)
47063 +{
47064 + struct task_struct *p, *p2, *p3;
47065 + struct files_struct *files;
47066 + struct fdtable *fdt;
47067 + struct file *our_file = NULL, *file;
47068 + int i;
47069 +
47070 + if (task->signal->tty == NULL)
47071 + return 1;
47072 +
47073 + files = get_files_struct(task);
47074 + if (files != NULL) {
47075 + rcu_read_lock();
47076 + fdt = files_fdtable(files);
47077 + for (i=0; i < fdt->max_fds; i++) {
47078 + file = fcheck_files(files, i);
47079 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47080 + get_file(file);
47081 + our_file = file;
47082 + }
47083 + }
47084 + rcu_read_unlock();
47085 + put_files_struct(files);
47086 + }
47087 +
47088 + if (our_file == NULL)
47089 + return 1;
47090 +
47091 + read_lock(&tasklist_lock);
47092 + do_each_thread(p2, p) {
47093 + files = get_files_struct(p);
47094 + if (files == NULL ||
47095 + (p->signal && p->signal->tty == task->signal->tty)) {
47096 + if (files != NULL)
47097 + put_files_struct(files);
47098 + continue;
47099 + }
47100 + rcu_read_lock();
47101 + fdt = files_fdtable(files);
47102 + for (i=0; i < fdt->max_fds; i++) {
47103 + file = fcheck_files(files, i);
47104 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47105 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47106 + p3 = task;
47107 + while (p3->pid > 0) {
47108 + if (p3 == p)
47109 + break;
47110 + p3 = p3->real_parent;
47111 + }
47112 + if (p3 == p)
47113 + break;
47114 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47115 + gr_handle_alertkill(p);
47116 + rcu_read_unlock();
47117 + put_files_struct(files);
47118 + read_unlock(&tasklist_lock);
47119 + fput(our_file);
47120 + return 0;
47121 + }
47122 + }
47123 + rcu_read_unlock();
47124 + put_files_struct(files);
47125 + } while_each_thread(p2, p);
47126 + read_unlock(&tasklist_lock);
47127 +
47128 + fput(our_file);
47129 + return 1;
47130 +}
47131 +
47132 +ssize_t
47133 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47134 +{
47135 + struct gr_arg_wrapper uwrap;
47136 + unsigned char *sprole_salt = NULL;
47137 + unsigned char *sprole_sum = NULL;
47138 + int error = sizeof (struct gr_arg_wrapper);
47139 + int error2 = 0;
47140 +
47141 + mutex_lock(&gr_dev_mutex);
47142 +
47143 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47144 + error = -EPERM;
47145 + goto out;
47146 + }
47147 +
47148 + if (count != sizeof (struct gr_arg_wrapper)) {
47149 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47150 + error = -EINVAL;
47151 + goto out;
47152 + }
47153 +
47154 +
47155 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47156 + gr_auth_expires = 0;
47157 + gr_auth_attempts = 0;
47158 + }
47159 +
47160 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47161 + error = -EFAULT;
47162 + goto out;
47163 + }
47164 +
47165 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47166 + error = -EINVAL;
47167 + goto out;
47168 + }
47169 +
47170 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47171 + error = -EFAULT;
47172 + goto out;
47173 + }
47174 +
47175 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47176 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47177 + time_after(gr_auth_expires, get_seconds())) {
47178 + error = -EBUSY;
47179 + goto out;
47180 + }
47181 +
47182 + /* if non-root trying to do anything other than use a special role,
47183 + do not attempt authentication, do not count towards authentication
47184 + locking
47185 + */
47186 +
47187 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47188 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47189 + current_uid()) {
47190 + error = -EPERM;
47191 + goto out;
47192 + }
47193 +
47194 + /* ensure pw and special role name are null terminated */
47195 +
47196 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47197 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47198 +
47199 + /* Okay.
47200 + * We have our enough of the argument structure..(we have yet
47201 + * to copy_from_user the tables themselves) . Copy the tables
47202 + * only if we need them, i.e. for loading operations. */
47203 +
47204 + switch (gr_usermode->mode) {
47205 + case GR_STATUS:
47206 + if (gr_status & GR_READY) {
47207 + error = 1;
47208 + if (!gr_check_secure_terminal(current))
47209 + error = 3;
47210 + } else
47211 + error = 2;
47212 + goto out;
47213 + case GR_SHUTDOWN:
47214 + if ((gr_status & GR_READY)
47215 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47216 + pax_open_kernel();
47217 + gr_status &= ~GR_READY;
47218 + pax_close_kernel();
47219 +
47220 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47221 + free_variables();
47222 + memset(gr_usermode, 0, sizeof (struct gr_arg));
47223 + memset(gr_system_salt, 0, GR_SALT_LEN);
47224 + memset(gr_system_sum, 0, GR_SHA_LEN);
47225 + } else if (gr_status & GR_READY) {
47226 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47227 + error = -EPERM;
47228 + } else {
47229 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47230 + error = -EAGAIN;
47231 + }
47232 + break;
47233 + case GR_ENABLE:
47234 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47235 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47236 + else {
47237 + if (gr_status & GR_READY)
47238 + error = -EAGAIN;
47239 + else
47240 + error = error2;
47241 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47242 + }
47243 + break;
47244 + case GR_RELOAD:
47245 + if (!(gr_status & GR_READY)) {
47246 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47247 + error = -EAGAIN;
47248 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47249 + preempt_disable();
47250 +
47251 + pax_open_kernel();
47252 + gr_status &= ~GR_READY;
47253 + pax_close_kernel();
47254 +
47255 + free_variables();
47256 + if (!(error2 = gracl_init(gr_usermode))) {
47257 + preempt_enable();
47258 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47259 + } else {
47260 + preempt_enable();
47261 + error = error2;
47262 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47263 + }
47264 + } else {
47265 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47266 + error = -EPERM;
47267 + }
47268 + break;
47269 + case GR_SEGVMOD:
47270 + if (unlikely(!(gr_status & GR_READY))) {
47271 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47272 + error = -EAGAIN;
47273 + break;
47274 + }
47275 +
47276 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47277 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47278 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47279 + struct acl_subject_label *segvacl;
47280 + segvacl =
47281 + lookup_acl_subj_label(gr_usermode->segv_inode,
47282 + gr_usermode->segv_device,
47283 + current->role);
47284 + if (segvacl) {
47285 + segvacl->crashes = 0;
47286 + segvacl->expires = 0;
47287 + }
47288 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47289 + gr_remove_uid(gr_usermode->segv_uid);
47290 + }
47291 + } else {
47292 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47293 + error = -EPERM;
47294 + }
47295 + break;
47296 + case GR_SPROLE:
47297 + case GR_SPROLEPAM:
47298 + if (unlikely(!(gr_status & GR_READY))) {
47299 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47300 + error = -EAGAIN;
47301 + break;
47302 + }
47303 +
47304 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47305 + current->role->expires = 0;
47306 + current->role->auth_attempts = 0;
47307 + }
47308 +
47309 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47310 + time_after(current->role->expires, get_seconds())) {
47311 + error = -EBUSY;
47312 + goto out;
47313 + }
47314 +
47315 + if (lookup_special_role_auth
47316 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47317 + && ((!sprole_salt && !sprole_sum)
47318 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47319 + char *p = "";
47320 + assign_special_role(gr_usermode->sp_role);
47321 + read_lock(&tasklist_lock);
47322 + if (current->real_parent)
47323 + p = current->real_parent->role->rolename;
47324 + read_unlock(&tasklist_lock);
47325 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47326 + p, acl_sp_role_value);
47327 + } else {
47328 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47329 + error = -EPERM;
47330 + if(!(current->role->auth_attempts++))
47331 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47332 +
47333 + goto out;
47334 + }
47335 + break;
47336 + case GR_UNSPROLE:
47337 + if (unlikely(!(gr_status & GR_READY))) {
47338 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47339 + error = -EAGAIN;
47340 + break;
47341 + }
47342 +
47343 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47344 + char *p = "";
47345 + int i = 0;
47346 +
47347 + read_lock(&tasklist_lock);
47348 + if (current->real_parent) {
47349 + p = current->real_parent->role->rolename;
47350 + i = current->real_parent->acl_role_id;
47351 + }
47352 + read_unlock(&tasklist_lock);
47353 +
47354 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47355 + gr_set_acls(1);
47356 + } else {
47357 + error = -EPERM;
47358 + goto out;
47359 + }
47360 + break;
47361 + default:
47362 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47363 + error = -EINVAL;
47364 + break;
47365 + }
47366 +
47367 + if (error != -EPERM)
47368 + goto out;
47369 +
47370 + if(!(gr_auth_attempts++))
47371 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47372 +
47373 + out:
47374 + mutex_unlock(&gr_dev_mutex);
47375 + return error;
47376 +}
47377 +
47378 +/* must be called with
47379 + rcu_read_lock();
47380 + read_lock(&tasklist_lock);
47381 + read_lock(&grsec_exec_file_lock);
47382 +*/
47383 +int gr_apply_subject_to_task(struct task_struct *task)
47384 +{
47385 + struct acl_object_label *obj;
47386 + char *tmpname;
47387 + struct acl_subject_label *tmpsubj;
47388 + struct file *filp;
47389 + struct name_entry *nmatch;
47390 +
47391 + filp = task->exec_file;
47392 + if (filp == NULL)
47393 + return 0;
47394 +
47395 + /* the following is to apply the correct subject
47396 + on binaries running when the RBAC system
47397 + is enabled, when the binaries have been
47398 + replaced or deleted since their execution
47399 + -----
47400 + when the RBAC system starts, the inode/dev
47401 + from exec_file will be one the RBAC system
47402 + is unaware of. It only knows the inode/dev
47403 + of the present file on disk, or the absence
47404 + of it.
47405 + */
47406 + preempt_disable();
47407 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47408 +
47409 + nmatch = lookup_name_entry(tmpname);
47410 + preempt_enable();
47411 + tmpsubj = NULL;
47412 + if (nmatch) {
47413 + if (nmatch->deleted)
47414 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47415 + else
47416 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47417 + if (tmpsubj != NULL)
47418 + task->acl = tmpsubj;
47419 + }
47420 + if (tmpsubj == NULL)
47421 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47422 + task->role);
47423 + if (task->acl) {
47424 + task->is_writable = 0;
47425 + /* ignore additional mmap checks for processes that are writable
47426 + by the default ACL */
47427 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47428 + if (unlikely(obj->mode & GR_WRITE))
47429 + task->is_writable = 1;
47430 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47431 + if (unlikely(obj->mode & GR_WRITE))
47432 + task->is_writable = 1;
47433 +
47434 + gr_set_proc_res(task);
47435 +
47436 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47437 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47438 +#endif
47439 + } else {
47440 + return 1;
47441 + }
47442 +
47443 + return 0;
47444 +}
47445 +
47446 +int
47447 +gr_set_acls(const int type)
47448 +{
47449 + struct task_struct *task, *task2;
47450 + struct acl_role_label *role = current->role;
47451 + __u16 acl_role_id = current->acl_role_id;
47452 + const struct cred *cred;
47453 + int ret;
47454 +
47455 + rcu_read_lock();
47456 + read_lock(&tasklist_lock);
47457 + read_lock(&grsec_exec_file_lock);
47458 + do_each_thread(task2, task) {
47459 + /* check to see if we're called from the exit handler,
47460 + if so, only replace ACLs that have inherited the admin
47461 + ACL */
47462 +
47463 + if (type && (task->role != role ||
47464 + task->acl_role_id != acl_role_id))
47465 + continue;
47466 +
47467 + task->acl_role_id = 0;
47468 + task->acl_sp_role = 0;
47469 +
47470 + if (task->exec_file) {
47471 + cred = __task_cred(task);
47472 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47473 + ret = gr_apply_subject_to_task(task);
47474 + if (ret) {
47475 + read_unlock(&grsec_exec_file_lock);
47476 + read_unlock(&tasklist_lock);
47477 + rcu_read_unlock();
47478 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47479 + return ret;
47480 + }
47481 + } else {
47482 + // it's a kernel process
47483 + task->role = kernel_role;
47484 + task->acl = kernel_role->root_label;
47485 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47486 + task->acl->mode &= ~GR_PROCFIND;
47487 +#endif
47488 + }
47489 + } while_each_thread(task2, task);
47490 + read_unlock(&grsec_exec_file_lock);
47491 + read_unlock(&tasklist_lock);
47492 + rcu_read_unlock();
47493 +
47494 + return 0;
47495 +}
47496 +
47497 +void
47498 +gr_learn_resource(const struct task_struct *task,
47499 + const int res, const unsigned long wanted, const int gt)
47500 +{
47501 + struct acl_subject_label *acl;
47502 + const struct cred *cred;
47503 +
47504 + if (unlikely((gr_status & GR_READY) &&
47505 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47506 + goto skip_reslog;
47507 +
47508 +#ifdef CONFIG_GRKERNSEC_RESLOG
47509 + gr_log_resource(task, res, wanted, gt);
47510 +#endif
47511 + skip_reslog:
47512 +
47513 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47514 + return;
47515 +
47516 + acl = task->acl;
47517 +
47518 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47519 + !(acl->resmask & (1 << (unsigned short) res))))
47520 + return;
47521 +
47522 + if (wanted >= acl->res[res].rlim_cur) {
47523 + unsigned long res_add;
47524 +
47525 + res_add = wanted;
47526 + switch (res) {
47527 + case RLIMIT_CPU:
47528 + res_add += GR_RLIM_CPU_BUMP;
47529 + break;
47530 + case RLIMIT_FSIZE:
47531 + res_add += GR_RLIM_FSIZE_BUMP;
47532 + break;
47533 + case RLIMIT_DATA:
47534 + res_add += GR_RLIM_DATA_BUMP;
47535 + break;
47536 + case RLIMIT_STACK:
47537 + res_add += GR_RLIM_STACK_BUMP;
47538 + break;
47539 + case RLIMIT_CORE:
47540 + res_add += GR_RLIM_CORE_BUMP;
47541 + break;
47542 + case RLIMIT_RSS:
47543 + res_add += GR_RLIM_RSS_BUMP;
47544 + break;
47545 + case RLIMIT_NPROC:
47546 + res_add += GR_RLIM_NPROC_BUMP;
47547 + break;
47548 + case RLIMIT_NOFILE:
47549 + res_add += GR_RLIM_NOFILE_BUMP;
47550 + break;
47551 + case RLIMIT_MEMLOCK:
47552 + res_add += GR_RLIM_MEMLOCK_BUMP;
47553 + break;
47554 + case RLIMIT_AS:
47555 + res_add += GR_RLIM_AS_BUMP;
47556 + break;
47557 + case RLIMIT_LOCKS:
47558 + res_add += GR_RLIM_LOCKS_BUMP;
47559 + break;
47560 + case RLIMIT_SIGPENDING:
47561 + res_add += GR_RLIM_SIGPENDING_BUMP;
47562 + break;
47563 + case RLIMIT_MSGQUEUE:
47564 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47565 + break;
47566 + case RLIMIT_NICE:
47567 + res_add += GR_RLIM_NICE_BUMP;
47568 + break;
47569 + case RLIMIT_RTPRIO:
47570 + res_add += GR_RLIM_RTPRIO_BUMP;
47571 + break;
47572 + case RLIMIT_RTTIME:
47573 + res_add += GR_RLIM_RTTIME_BUMP;
47574 + break;
47575 + }
47576 +
47577 + acl->res[res].rlim_cur = res_add;
47578 +
47579 + if (wanted > acl->res[res].rlim_max)
47580 + acl->res[res].rlim_max = res_add;
47581 +
47582 + /* only log the subject filename, since resource logging is supported for
47583 + single-subject learning only */
47584 + rcu_read_lock();
47585 + cred = __task_cred(task);
47586 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47587 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47588 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47589 + "", (unsigned long) res, &task->signal->saved_ip);
47590 + rcu_read_unlock();
47591 + }
47592 +
47593 + return;
47594 +}
47595 +
47596 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47597 +void
47598 +pax_set_initial_flags(struct linux_binprm *bprm)
47599 +{
47600 + struct task_struct *task = current;
47601 + struct acl_subject_label *proc;
47602 + unsigned long flags;
47603 +
47604 + if (unlikely(!(gr_status & GR_READY)))
47605 + return;
47606 +
47607 + flags = pax_get_flags(task);
47608 +
47609 + proc = task->acl;
47610 +
47611 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47612 + flags &= ~MF_PAX_PAGEEXEC;
47613 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47614 + flags &= ~MF_PAX_SEGMEXEC;
47615 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47616 + flags &= ~MF_PAX_RANDMMAP;
47617 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47618 + flags &= ~MF_PAX_EMUTRAMP;
47619 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47620 + flags &= ~MF_PAX_MPROTECT;
47621 +
47622 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47623 + flags |= MF_PAX_PAGEEXEC;
47624 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47625 + flags |= MF_PAX_SEGMEXEC;
47626 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47627 + flags |= MF_PAX_RANDMMAP;
47628 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47629 + flags |= MF_PAX_EMUTRAMP;
47630 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47631 + flags |= MF_PAX_MPROTECT;
47632 +
47633 + pax_set_flags(task, flags);
47634 +
47635 + return;
47636 +}
47637 +#endif
47638 +
47639 +#ifdef CONFIG_SYSCTL
47640 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47641 + system to save 35kb of memory */
47642 +
47643 +/* we modify the passed in filename, but adjust it back before returning */
47644 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47645 +{
47646 + struct name_entry *nmatch;
47647 + char *p, *lastp = NULL;
47648 + struct acl_object_label *obj = NULL, *tmp;
47649 + struct acl_subject_label *tmpsubj;
47650 + char c = '\0';
47651 +
47652 + read_lock(&gr_inode_lock);
47653 +
47654 + p = name + len - 1;
47655 + do {
47656 + nmatch = lookup_name_entry(name);
47657 + if (lastp != NULL)
47658 + *lastp = c;
47659 +
47660 + if (nmatch == NULL)
47661 + goto next_component;
47662 + tmpsubj = current->acl;
47663 + do {
47664 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47665 + if (obj != NULL) {
47666 + tmp = obj->globbed;
47667 + while (tmp) {
47668 + if (!glob_match(tmp->filename, name)) {
47669 + obj = tmp;
47670 + goto found_obj;
47671 + }
47672 + tmp = tmp->next;
47673 + }
47674 + goto found_obj;
47675 + }
47676 + } while ((tmpsubj = tmpsubj->parent_subject));
47677 +next_component:
47678 + /* end case */
47679 + if (p == name)
47680 + break;
47681 +
47682 + while (*p != '/')
47683 + p--;
47684 + if (p == name)
47685 + lastp = p + 1;
47686 + else {
47687 + lastp = p;
47688 + p--;
47689 + }
47690 + c = *lastp;
47691 + *lastp = '\0';
47692 + } while (1);
47693 +found_obj:
47694 + read_unlock(&gr_inode_lock);
47695 + /* obj returned will always be non-null */
47696 + return obj;
47697 +}
47698 +
47699 +/* returns 0 when allowing, non-zero on error
47700 + op of 0 is used for readdir, so we don't log the names of hidden files
47701 +*/
47702 +__u32
47703 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47704 +{
47705 + struct ctl_table *tmp;
47706 + const char *proc_sys = "/proc/sys";
47707 + char *path;
47708 + struct acl_object_label *obj;
47709 + unsigned short len = 0, pos = 0, depth = 0, i;
47710 + __u32 err = 0;
47711 + __u32 mode = 0;
47712 +
47713 + if (unlikely(!(gr_status & GR_READY)))
47714 + return 0;
47715 +
47716 + /* for now, ignore operations on non-sysctl entries if it's not a
47717 + readdir*/
47718 + if (table->child != NULL && op != 0)
47719 + return 0;
47720 +
47721 + mode |= GR_FIND;
47722 + /* it's only a read if it's an entry, read on dirs is for readdir */
47723 + if (op & MAY_READ)
47724 + mode |= GR_READ;
47725 + if (op & MAY_WRITE)
47726 + mode |= GR_WRITE;
47727 +
47728 + preempt_disable();
47729 +
47730 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47731 +
47732 + /* it's only a read/write if it's an actual entry, not a dir
47733 + (which are opened for readdir)
47734 + */
47735 +
47736 + /* convert the requested sysctl entry into a pathname */
47737 +
47738 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47739 + len += strlen(tmp->procname);
47740 + len++;
47741 + depth++;
47742 + }
47743 +
47744 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47745 + /* deny */
47746 + goto out;
47747 + }
47748 +
47749 + memset(path, 0, PAGE_SIZE);
47750 +
47751 + memcpy(path, proc_sys, strlen(proc_sys));
47752 +
47753 + pos += strlen(proc_sys);
47754 +
47755 + for (; depth > 0; depth--) {
47756 + path[pos] = '/';
47757 + pos++;
47758 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47759 + if (depth == i) {
47760 + memcpy(path + pos, tmp->procname,
47761 + strlen(tmp->procname));
47762 + pos += strlen(tmp->procname);
47763 + }
47764 + i++;
47765 + }
47766 + }
47767 +
47768 + obj = gr_lookup_by_name(path, pos);
47769 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47770 +
47771 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47772 + ((err & mode) != mode))) {
47773 + __u32 new_mode = mode;
47774 +
47775 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47776 +
47777 + err = 0;
47778 + gr_log_learn_sysctl(path, new_mode);
47779 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47780 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47781 + err = -ENOENT;
47782 + } else if (!(err & GR_FIND)) {
47783 + err = -ENOENT;
47784 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47785 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47786 + path, (mode & GR_READ) ? " reading" : "",
47787 + (mode & GR_WRITE) ? " writing" : "");
47788 + err = -EACCES;
47789 + } else if ((err & mode) != mode) {
47790 + err = -EACCES;
47791 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47792 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47793 + path, (mode & GR_READ) ? " reading" : "",
47794 + (mode & GR_WRITE) ? " writing" : "");
47795 + err = 0;
47796 + } else
47797 + err = 0;
47798 +
47799 + out:
47800 + preempt_enable();
47801 +
47802 + return err;
47803 +}
47804 +#endif
47805 +
47806 +int
47807 +gr_handle_proc_ptrace(struct task_struct *task)
47808 +{
47809 + struct file *filp;
47810 + struct task_struct *tmp = task;
47811 + struct task_struct *curtemp = current;
47812 + __u32 retmode;
47813 +
47814 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47815 + if (unlikely(!(gr_status & GR_READY)))
47816 + return 0;
47817 +#endif
47818 +
47819 + read_lock(&tasklist_lock);
47820 + read_lock(&grsec_exec_file_lock);
47821 + filp = task->exec_file;
47822 +
47823 + while (tmp->pid > 0) {
47824 + if (tmp == curtemp)
47825 + break;
47826 + tmp = tmp->real_parent;
47827 + }
47828 +
47829 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47830 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47831 + read_unlock(&grsec_exec_file_lock);
47832 + read_unlock(&tasklist_lock);
47833 + return 1;
47834 + }
47835 +
47836 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47837 + if (!(gr_status & GR_READY)) {
47838 + read_unlock(&grsec_exec_file_lock);
47839 + read_unlock(&tasklist_lock);
47840 + return 0;
47841 + }
47842 +#endif
47843 +
47844 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47845 + read_unlock(&grsec_exec_file_lock);
47846 + read_unlock(&tasklist_lock);
47847 +
47848 + if (retmode & GR_NOPTRACE)
47849 + return 1;
47850 +
47851 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47852 + && (current->acl != task->acl || (current->acl != current->role->root_label
47853 + && current->pid != task->pid)))
47854 + return 1;
47855 +
47856 + return 0;
47857 +}
47858 +
47859 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47860 +{
47861 + if (unlikely(!(gr_status & GR_READY)))
47862 + return;
47863 +
47864 + if (!(current->role->roletype & GR_ROLE_GOD))
47865 + return;
47866 +
47867 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47868 + p->role->rolename, gr_task_roletype_to_char(p),
47869 + p->acl->filename);
47870 +}
47871 +
47872 +int
47873 +gr_handle_ptrace(struct task_struct *task, const long request)
47874 +{
47875 + struct task_struct *tmp = task;
47876 + struct task_struct *curtemp = current;
47877 + __u32 retmode;
47878 +
47879 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47880 + if (unlikely(!(gr_status & GR_READY)))
47881 + return 0;
47882 +#endif
47883 +
47884 + read_lock(&tasklist_lock);
47885 + while (tmp->pid > 0) {
47886 + if (tmp == curtemp)
47887 + break;
47888 + tmp = tmp->real_parent;
47889 + }
47890 +
47891 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47892 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47893 + read_unlock(&tasklist_lock);
47894 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47895 + return 1;
47896 + }
47897 + read_unlock(&tasklist_lock);
47898 +
47899 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47900 + if (!(gr_status & GR_READY))
47901 + return 0;
47902 +#endif
47903 +
47904 + read_lock(&grsec_exec_file_lock);
47905 + if (unlikely(!task->exec_file)) {
47906 + read_unlock(&grsec_exec_file_lock);
47907 + return 0;
47908 + }
47909 +
47910 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47911 + read_unlock(&grsec_exec_file_lock);
47912 +
47913 + if (retmode & GR_NOPTRACE) {
47914 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47915 + return 1;
47916 + }
47917 +
47918 + if (retmode & GR_PTRACERD) {
47919 + switch (request) {
47920 + case PTRACE_POKETEXT:
47921 + case PTRACE_POKEDATA:
47922 + case PTRACE_POKEUSR:
47923 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47924 + case PTRACE_SETREGS:
47925 + case PTRACE_SETFPREGS:
47926 +#endif
47927 +#ifdef CONFIG_X86
47928 + case PTRACE_SETFPXREGS:
47929 +#endif
47930 +#ifdef CONFIG_ALTIVEC
47931 + case PTRACE_SETVRREGS:
47932 +#endif
47933 + return 1;
47934 + default:
47935 + return 0;
47936 + }
47937 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47938 + !(current->role->roletype & GR_ROLE_GOD) &&
47939 + (current->acl != task->acl)) {
47940 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47941 + return 1;
47942 + }
47943 +
47944 + return 0;
47945 +}
47946 +
47947 +static int is_writable_mmap(const struct file *filp)
47948 +{
47949 + struct task_struct *task = current;
47950 + struct acl_object_label *obj, *obj2;
47951 +
47952 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47953 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47954 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47955 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47956 + task->role->root_label);
47957 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47958 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47959 + return 1;
47960 + }
47961 + }
47962 + return 0;
47963 +}
47964 +
47965 +int
47966 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47967 +{
47968 + __u32 mode;
47969 +
47970 + if (unlikely(!file || !(prot & PROT_EXEC)))
47971 + return 1;
47972 +
47973 + if (is_writable_mmap(file))
47974 + return 0;
47975 +
47976 + mode =
47977 + gr_search_file(file->f_path.dentry,
47978 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47979 + file->f_path.mnt);
47980 +
47981 + if (!gr_tpe_allow(file))
47982 + return 0;
47983 +
47984 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47985 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47986 + return 0;
47987 + } else if (unlikely(!(mode & GR_EXEC))) {
47988 + return 0;
47989 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47990 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47991 + return 1;
47992 + }
47993 +
47994 + return 1;
47995 +}
47996 +
47997 +int
47998 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47999 +{
48000 + __u32 mode;
48001 +
48002 + if (unlikely(!file || !(prot & PROT_EXEC)))
48003 + return 1;
48004 +
48005 + if (is_writable_mmap(file))
48006 + return 0;
48007 +
48008 + mode =
48009 + gr_search_file(file->f_path.dentry,
48010 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48011 + file->f_path.mnt);
48012 +
48013 + if (!gr_tpe_allow(file))
48014 + return 0;
48015 +
48016 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48017 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48018 + return 0;
48019 + } else if (unlikely(!(mode & GR_EXEC))) {
48020 + return 0;
48021 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48022 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48023 + return 1;
48024 + }
48025 +
48026 + return 1;
48027 +}
48028 +
48029 +void
48030 +gr_acl_handle_psacct(struct task_struct *task, const long code)
48031 +{
48032 + unsigned long runtime;
48033 + unsigned long cputime;
48034 + unsigned int wday, cday;
48035 + __u8 whr, chr;
48036 + __u8 wmin, cmin;
48037 + __u8 wsec, csec;
48038 + struct timespec timeval;
48039 +
48040 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48041 + !(task->acl->mode & GR_PROCACCT)))
48042 + return;
48043 +
48044 + do_posix_clock_monotonic_gettime(&timeval);
48045 + runtime = timeval.tv_sec - task->start_time.tv_sec;
48046 + wday = runtime / (3600 * 24);
48047 + runtime -= wday * (3600 * 24);
48048 + whr = runtime / 3600;
48049 + runtime -= whr * 3600;
48050 + wmin = runtime / 60;
48051 + runtime -= wmin * 60;
48052 + wsec = runtime;
48053 +
48054 + cputime = (task->utime + task->stime) / HZ;
48055 + cday = cputime / (3600 * 24);
48056 + cputime -= cday * (3600 * 24);
48057 + chr = cputime / 3600;
48058 + cputime -= chr * 3600;
48059 + cmin = cputime / 60;
48060 + cputime -= cmin * 60;
48061 + csec = cputime;
48062 +
48063 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48064 +
48065 + return;
48066 +}
48067 +
48068 +void gr_set_kernel_label(struct task_struct *task)
48069 +{
48070 + if (gr_status & GR_READY) {
48071 + task->role = kernel_role;
48072 + task->acl = kernel_role->root_label;
48073 + }
48074 + return;
48075 +}
48076 +
48077 +#ifdef CONFIG_TASKSTATS
48078 +int gr_is_taskstats_denied(int pid)
48079 +{
48080 + struct task_struct *task;
48081 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48082 + const struct cred *cred;
48083 +#endif
48084 + int ret = 0;
48085 +
48086 + /* restrict taskstats viewing to un-chrooted root users
48087 + who have the 'view' subject flag if the RBAC system is enabled
48088 + */
48089 +
48090 + rcu_read_lock();
48091 + read_lock(&tasklist_lock);
48092 + task = find_task_by_vpid(pid);
48093 + if (task) {
48094 +#ifdef CONFIG_GRKERNSEC_CHROOT
48095 + if (proc_is_chrooted(task))
48096 + ret = -EACCES;
48097 +#endif
48098 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48099 + cred = __task_cred(task);
48100 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48101 + if (cred->uid != 0)
48102 + ret = -EACCES;
48103 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48104 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48105 + ret = -EACCES;
48106 +#endif
48107 +#endif
48108 + if (gr_status & GR_READY) {
48109 + if (!(task->acl->mode & GR_VIEW))
48110 + ret = -EACCES;
48111 + }
48112 + } else
48113 + ret = -ENOENT;
48114 +
48115 + read_unlock(&tasklist_lock);
48116 + rcu_read_unlock();
48117 +
48118 + return ret;
48119 +}
48120 +#endif
48121 +
48122 +/* AUXV entries are filled via a descendant of search_binary_handler
48123 + after we've already applied the subject for the target
48124 +*/
48125 +int gr_acl_enable_at_secure(void)
48126 +{
48127 + if (unlikely(!(gr_status & GR_READY)))
48128 + return 0;
48129 +
48130 + if (current->acl->mode & GR_ATSECURE)
48131 + return 1;
48132 +
48133 + return 0;
48134 +}
48135 +
48136 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48137 +{
48138 + struct task_struct *task = current;
48139 + struct dentry *dentry = file->f_path.dentry;
48140 + struct vfsmount *mnt = file->f_path.mnt;
48141 + struct acl_object_label *obj, *tmp;
48142 + struct acl_subject_label *subj;
48143 + unsigned int bufsize;
48144 + int is_not_root;
48145 + char *path;
48146 + dev_t dev = __get_dev(dentry);
48147 +
48148 + if (unlikely(!(gr_status & GR_READY)))
48149 + return 1;
48150 +
48151 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48152 + return 1;
48153 +
48154 + /* ignore Eric Biederman */
48155 + if (IS_PRIVATE(dentry->d_inode))
48156 + return 1;
48157 +
48158 + subj = task->acl;
48159 + do {
48160 + obj = lookup_acl_obj_label(ino, dev, subj);
48161 + if (obj != NULL)
48162 + return (obj->mode & GR_FIND) ? 1 : 0;
48163 + } while ((subj = subj->parent_subject));
48164 +
48165 + /* this is purely an optimization since we're looking for an object
48166 + for the directory we're doing a readdir on
48167 + if it's possible for any globbed object to match the entry we're
48168 + filling into the directory, then the object we find here will be
48169 + an anchor point with attached globbed objects
48170 + */
48171 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48172 + if (obj->globbed == NULL)
48173 + return (obj->mode & GR_FIND) ? 1 : 0;
48174 +
48175 + is_not_root = ((obj->filename[0] == '/') &&
48176 + (obj->filename[1] == '\0')) ? 0 : 1;
48177 + bufsize = PAGE_SIZE - namelen - is_not_root;
48178 +
48179 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
48180 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48181 + return 1;
48182 +
48183 + preempt_disable();
48184 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48185 + bufsize);
48186 +
48187 + bufsize = strlen(path);
48188 +
48189 + /* if base is "/", don't append an additional slash */
48190 + if (is_not_root)
48191 + *(path + bufsize) = '/';
48192 + memcpy(path + bufsize + is_not_root, name, namelen);
48193 + *(path + bufsize + namelen + is_not_root) = '\0';
48194 +
48195 + tmp = obj->globbed;
48196 + while (tmp) {
48197 + if (!glob_match(tmp->filename, path)) {
48198 + preempt_enable();
48199 + return (tmp->mode & GR_FIND) ? 1 : 0;
48200 + }
48201 + tmp = tmp->next;
48202 + }
48203 + preempt_enable();
48204 + return (obj->mode & GR_FIND) ? 1 : 0;
48205 +}
48206 +
48207 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48208 +EXPORT_SYMBOL(gr_acl_is_enabled);
48209 +#endif
48210 +EXPORT_SYMBOL(gr_learn_resource);
48211 +EXPORT_SYMBOL(gr_set_kernel_label);
48212 +#ifdef CONFIG_SECURITY
48213 +EXPORT_SYMBOL(gr_check_user_change);
48214 +EXPORT_SYMBOL(gr_check_group_change);
48215 +#endif
48216 +
48217 diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48218 --- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48219 +++ linux-3.0.4/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
48220 @@ -0,0 +1,139 @@
48221 +#include <linux/kernel.h>
48222 +#include <linux/module.h>
48223 +#include <linux/sched.h>
48224 +#include <linux/gracl.h>
48225 +#include <linux/grsecurity.h>
48226 +#include <linux/grinternal.h>
48227 +
48228 +static const char *captab_log[] = {
48229 + "CAP_CHOWN",
48230 + "CAP_DAC_OVERRIDE",
48231 + "CAP_DAC_READ_SEARCH",
48232 + "CAP_FOWNER",
48233 + "CAP_FSETID",
48234 + "CAP_KILL",
48235 + "CAP_SETGID",
48236 + "CAP_SETUID",
48237 + "CAP_SETPCAP",
48238 + "CAP_LINUX_IMMUTABLE",
48239 + "CAP_NET_BIND_SERVICE",
48240 + "CAP_NET_BROADCAST",
48241 + "CAP_NET_ADMIN",
48242 + "CAP_NET_RAW",
48243 + "CAP_IPC_LOCK",
48244 + "CAP_IPC_OWNER",
48245 + "CAP_SYS_MODULE",
48246 + "CAP_SYS_RAWIO",
48247 + "CAP_SYS_CHROOT",
48248 + "CAP_SYS_PTRACE",
48249 + "CAP_SYS_PACCT",
48250 + "CAP_SYS_ADMIN",
48251 + "CAP_SYS_BOOT",
48252 + "CAP_SYS_NICE",
48253 + "CAP_SYS_RESOURCE",
48254 + "CAP_SYS_TIME",
48255 + "CAP_SYS_TTY_CONFIG",
48256 + "CAP_MKNOD",
48257 + "CAP_LEASE",
48258 + "CAP_AUDIT_WRITE",
48259 + "CAP_AUDIT_CONTROL",
48260 + "CAP_SETFCAP",
48261 + "CAP_MAC_OVERRIDE",
48262 + "CAP_MAC_ADMIN",
48263 + "CAP_SYSLOG"
48264 +};
48265 +
48266 +EXPORT_SYMBOL(gr_is_capable);
48267 +EXPORT_SYMBOL(gr_is_capable_nolog);
48268 +
48269 +int
48270 +gr_is_capable(const int cap)
48271 +{
48272 + struct task_struct *task = current;
48273 + const struct cred *cred = current_cred();
48274 + struct acl_subject_label *curracl;
48275 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48276 + kernel_cap_t cap_audit = __cap_empty_set;
48277 +
48278 + if (!gr_acl_is_enabled())
48279 + return 1;
48280 +
48281 + curracl = task->acl;
48282 +
48283 + cap_drop = curracl->cap_lower;
48284 + cap_mask = curracl->cap_mask;
48285 + cap_audit = curracl->cap_invert_audit;
48286 +
48287 + while ((curracl = curracl->parent_subject)) {
48288 + /* if the cap isn't specified in the current computed mask but is specified in the
48289 + current level subject, and is lowered in the current level subject, then add
48290 + it to the set of dropped capabilities
48291 + otherwise, add the current level subject's mask to the current computed mask
48292 + */
48293 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48294 + cap_raise(cap_mask, cap);
48295 + if (cap_raised(curracl->cap_lower, cap))
48296 + cap_raise(cap_drop, cap);
48297 + if (cap_raised(curracl->cap_invert_audit, cap))
48298 + cap_raise(cap_audit, cap);
48299 + }
48300 + }
48301 +
48302 + if (!cap_raised(cap_drop, cap)) {
48303 + if (cap_raised(cap_audit, cap))
48304 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48305 + return 1;
48306 + }
48307 +
48308 + curracl = task->acl;
48309 +
48310 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48311 + && cap_raised(cred->cap_effective, cap)) {
48312 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48313 + task->role->roletype, cred->uid,
48314 + cred->gid, task->exec_file ?
48315 + gr_to_filename(task->exec_file->f_path.dentry,
48316 + task->exec_file->f_path.mnt) : curracl->filename,
48317 + curracl->filename, 0UL,
48318 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48319 + return 1;
48320 + }
48321 +
48322 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48323 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48324 + return 0;
48325 +}
48326 +
48327 +int
48328 +gr_is_capable_nolog(const int cap)
48329 +{
48330 + struct acl_subject_label *curracl;
48331 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48332 +
48333 + if (!gr_acl_is_enabled())
48334 + return 1;
48335 +
48336 + curracl = current->acl;
48337 +
48338 + cap_drop = curracl->cap_lower;
48339 + cap_mask = curracl->cap_mask;
48340 +
48341 + while ((curracl = curracl->parent_subject)) {
48342 + /* if the cap isn't specified in the current computed mask but is specified in the
48343 + current level subject, and is lowered in the current level subject, then add
48344 + it to the set of dropped capabilities
48345 + otherwise, add the current level subject's mask to the current computed mask
48346 + */
48347 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48348 + cap_raise(cap_mask, cap);
48349 + if (cap_raised(curracl->cap_lower, cap))
48350 + cap_raise(cap_drop, cap);
48351 + }
48352 + }
48353 +
48354 + if (!cap_raised(cap_drop, cap))
48355 + return 1;
48356 +
48357 + return 0;
48358 +}
48359 +
48360 diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48361 --- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48362 +++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
48363 @@ -0,0 +1,431 @@
48364 +#include <linux/kernel.h>
48365 +#include <linux/sched.h>
48366 +#include <linux/types.h>
48367 +#include <linux/fs.h>
48368 +#include <linux/file.h>
48369 +#include <linux/stat.h>
48370 +#include <linux/grsecurity.h>
48371 +#include <linux/grinternal.h>
48372 +#include <linux/gracl.h>
48373 +
48374 +__u32
48375 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48376 + const struct vfsmount * mnt)
48377 +{
48378 + __u32 mode;
48379 +
48380 + if (unlikely(!dentry->d_inode))
48381 + return GR_FIND;
48382 +
48383 + mode =
48384 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48385 +
48386 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48387 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48388 + return mode;
48389 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48390 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48391 + return 0;
48392 + } else if (unlikely(!(mode & GR_FIND)))
48393 + return 0;
48394 +
48395 + return GR_FIND;
48396 +}
48397 +
48398 +__u32
48399 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48400 + const int fmode)
48401 +{
48402 + __u32 reqmode = GR_FIND;
48403 + __u32 mode;
48404 +
48405 + if (unlikely(!dentry->d_inode))
48406 + return reqmode;
48407 +
48408 + if (unlikely(fmode & O_APPEND))
48409 + reqmode |= GR_APPEND;
48410 + else if (unlikely(fmode & FMODE_WRITE))
48411 + reqmode |= GR_WRITE;
48412 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48413 + reqmode |= GR_READ;
48414 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48415 + reqmode &= ~GR_READ;
48416 + mode =
48417 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48418 + mnt);
48419 +
48420 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48421 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48422 + reqmode & GR_READ ? " reading" : "",
48423 + reqmode & GR_WRITE ? " writing" : reqmode &
48424 + GR_APPEND ? " appending" : "");
48425 + return reqmode;
48426 + } else
48427 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48428 + {
48429 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48430 + reqmode & GR_READ ? " reading" : "",
48431 + reqmode & GR_WRITE ? " writing" : reqmode &
48432 + GR_APPEND ? " appending" : "");
48433 + return 0;
48434 + } else if (unlikely((mode & reqmode) != reqmode))
48435 + return 0;
48436 +
48437 + return reqmode;
48438 +}
48439 +
48440 +__u32
48441 +gr_acl_handle_creat(const struct dentry * dentry,
48442 + const struct dentry * p_dentry,
48443 + const struct vfsmount * p_mnt, const int fmode,
48444 + const int imode)
48445 +{
48446 + __u32 reqmode = GR_WRITE | GR_CREATE;
48447 + __u32 mode;
48448 +
48449 + if (unlikely(fmode & O_APPEND))
48450 + reqmode |= GR_APPEND;
48451 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48452 + reqmode |= GR_READ;
48453 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48454 + reqmode |= GR_SETID;
48455 +
48456 + mode =
48457 + gr_check_create(dentry, p_dentry, p_mnt,
48458 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48459 +
48460 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48461 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48462 + reqmode & GR_READ ? " reading" : "",
48463 + reqmode & GR_WRITE ? " writing" : reqmode &
48464 + GR_APPEND ? " appending" : "");
48465 + return reqmode;
48466 + } else
48467 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48468 + {
48469 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48470 + reqmode & GR_READ ? " reading" : "",
48471 + reqmode & GR_WRITE ? " writing" : reqmode &
48472 + GR_APPEND ? " appending" : "");
48473 + return 0;
48474 + } else if (unlikely((mode & reqmode) != reqmode))
48475 + return 0;
48476 +
48477 + return reqmode;
48478 +}
48479 +
48480 +__u32
48481 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48482 + const int fmode)
48483 +{
48484 + __u32 mode, reqmode = GR_FIND;
48485 +
48486 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48487 + reqmode |= GR_EXEC;
48488 + if (fmode & S_IWOTH)
48489 + reqmode |= GR_WRITE;
48490 + if (fmode & S_IROTH)
48491 + reqmode |= GR_READ;
48492 +
48493 + mode =
48494 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48495 + mnt);
48496 +
48497 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48498 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48499 + reqmode & GR_READ ? " reading" : "",
48500 + reqmode & GR_WRITE ? " writing" : "",
48501 + reqmode & GR_EXEC ? " executing" : "");
48502 + return reqmode;
48503 + } else
48504 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48505 + {
48506 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48507 + reqmode & GR_READ ? " reading" : "",
48508 + reqmode & GR_WRITE ? " writing" : "",
48509 + reqmode & GR_EXEC ? " executing" : "");
48510 + return 0;
48511 + } else if (unlikely((mode & reqmode) != reqmode))
48512 + return 0;
48513 +
48514 + return reqmode;
48515 +}
48516 +
48517 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48518 +{
48519 + __u32 mode;
48520 +
48521 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48522 +
48523 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48524 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48525 + return mode;
48526 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48527 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48528 + return 0;
48529 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48530 + return 0;
48531 +
48532 + return (reqmode);
48533 +}
48534 +
48535 +__u32
48536 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48537 +{
48538 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48539 +}
48540 +
48541 +__u32
48542 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48543 +{
48544 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48545 +}
48546 +
48547 +__u32
48548 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48549 +{
48550 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48551 +}
48552 +
48553 +__u32
48554 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48555 +{
48556 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48557 +}
48558 +
48559 +__u32
48560 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48561 + mode_t mode)
48562 +{
48563 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48564 + return 1;
48565 +
48566 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48567 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48568 + GR_FCHMOD_ACL_MSG);
48569 + } else {
48570 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48571 + }
48572 +}
48573 +
48574 +__u32
48575 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48576 + mode_t mode)
48577 +{
48578 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48579 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48580 + GR_CHMOD_ACL_MSG);
48581 + } else {
48582 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48583 + }
48584 +}
48585 +
48586 +__u32
48587 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48588 +{
48589 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48590 +}
48591 +
48592 +__u32
48593 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48594 +{
48595 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48596 +}
48597 +
48598 +__u32
48599 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48600 +{
48601 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48602 +}
48603 +
48604 +__u32
48605 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48606 +{
48607 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48608 + GR_UNIXCONNECT_ACL_MSG);
48609 +}
48610 +
48611 +/* hardlinks require at minimum create permission,
48612 + any additional privilege required is based on the
48613 + privilege of the file being linked to
48614 +*/
48615 +__u32
48616 +gr_acl_handle_link(const struct dentry * new_dentry,
48617 + const struct dentry * parent_dentry,
48618 + const struct vfsmount * parent_mnt,
48619 + const struct dentry * old_dentry,
48620 + const struct vfsmount * old_mnt, const char *to)
48621 +{
48622 + __u32 mode;
48623 + __u32 needmode = GR_CREATE | GR_LINK;
48624 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48625 +
48626 + mode =
48627 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48628 + old_mnt);
48629 +
48630 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48631 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48632 + return mode;
48633 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48634 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48635 + return 0;
48636 + } else if (unlikely((mode & needmode) != needmode))
48637 + return 0;
48638 +
48639 + return 1;
48640 +}
48641 +
48642 +__u32
48643 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48644 + const struct dentry * parent_dentry,
48645 + const struct vfsmount * parent_mnt, const char *from)
48646 +{
48647 + __u32 needmode = GR_WRITE | GR_CREATE;
48648 + __u32 mode;
48649 +
48650 + mode =
48651 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48652 + GR_CREATE | GR_AUDIT_CREATE |
48653 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48654 +
48655 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48656 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48657 + return mode;
48658 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48659 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48660 + return 0;
48661 + } else if (unlikely((mode & needmode) != needmode))
48662 + return 0;
48663 +
48664 + return (GR_WRITE | GR_CREATE);
48665 +}
48666 +
48667 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48668 +{
48669 + __u32 mode;
48670 +
48671 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48672 +
48673 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48674 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48675 + return mode;
48676 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48677 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48678 + return 0;
48679 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48680 + return 0;
48681 +
48682 + return (reqmode);
48683 +}
48684 +
48685 +__u32
48686 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48687 + const struct dentry * parent_dentry,
48688 + const struct vfsmount * parent_mnt,
48689 + const int mode)
48690 +{
48691 + __u32 reqmode = GR_WRITE | GR_CREATE;
48692 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48693 + reqmode |= GR_SETID;
48694 +
48695 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48696 + reqmode, GR_MKNOD_ACL_MSG);
48697 +}
48698 +
48699 +__u32
48700 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48701 + const struct dentry *parent_dentry,
48702 + const struct vfsmount *parent_mnt)
48703 +{
48704 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48705 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48706 +}
48707 +
48708 +#define RENAME_CHECK_SUCCESS(old, new) \
48709 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48710 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48711 +
48712 +int
48713 +gr_acl_handle_rename(struct dentry *new_dentry,
48714 + struct dentry *parent_dentry,
48715 + const struct vfsmount *parent_mnt,
48716 + struct dentry *old_dentry,
48717 + struct inode *old_parent_inode,
48718 + struct vfsmount *old_mnt, const char *newname)
48719 +{
48720 + __u32 comp1, comp2;
48721 + int error = 0;
48722 +
48723 + if (unlikely(!gr_acl_is_enabled()))
48724 + return 0;
48725 +
48726 + if (!new_dentry->d_inode) {
48727 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48728 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48729 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48730 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48731 + GR_DELETE | GR_AUDIT_DELETE |
48732 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48733 + GR_SUPPRESS, old_mnt);
48734 + } else {
48735 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48736 + GR_CREATE | GR_DELETE |
48737 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48738 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48739 + GR_SUPPRESS, parent_mnt);
48740 + comp2 =
48741 + gr_search_file(old_dentry,
48742 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48743 + GR_DELETE | GR_AUDIT_DELETE |
48744 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48745 + }
48746 +
48747 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48748 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48749 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48750 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48751 + && !(comp2 & GR_SUPPRESS)) {
48752 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48753 + error = -EACCES;
48754 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48755 + error = -EACCES;
48756 +
48757 + return error;
48758 +}
48759 +
48760 +void
48761 +gr_acl_handle_exit(void)
48762 +{
48763 + u16 id;
48764 + char *rolename;
48765 + struct file *exec_file;
48766 +
48767 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48768 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48769 + id = current->acl_role_id;
48770 + rolename = current->role->rolename;
48771 + gr_set_acls(1);
48772 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48773 + }
48774 +
48775 + write_lock(&grsec_exec_file_lock);
48776 + exec_file = current->exec_file;
48777 + current->exec_file = NULL;
48778 + write_unlock(&grsec_exec_file_lock);
48779 +
48780 + if (exec_file)
48781 + fput(exec_file);
48782 +}
48783 +
48784 +int
48785 +gr_acl_handle_procpidmem(const struct task_struct *task)
48786 +{
48787 + if (unlikely(!gr_acl_is_enabled()))
48788 + return 0;
48789 +
48790 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48791 + return -EACCES;
48792 +
48793 + return 0;
48794 +}
48795 diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
48796 --- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48797 +++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
48798 @@ -0,0 +1,381 @@
48799 +#include <linux/kernel.h>
48800 +#include <asm/uaccess.h>
48801 +#include <asm/errno.h>
48802 +#include <net/sock.h>
48803 +#include <linux/file.h>
48804 +#include <linux/fs.h>
48805 +#include <linux/net.h>
48806 +#include <linux/in.h>
48807 +#include <linux/skbuff.h>
48808 +#include <linux/ip.h>
48809 +#include <linux/udp.h>
48810 +#include <linux/types.h>
48811 +#include <linux/sched.h>
48812 +#include <linux/netdevice.h>
48813 +#include <linux/inetdevice.h>
48814 +#include <linux/gracl.h>
48815 +#include <linux/grsecurity.h>
48816 +#include <linux/grinternal.h>
48817 +
48818 +#define GR_BIND 0x01
48819 +#define GR_CONNECT 0x02
48820 +#define GR_INVERT 0x04
48821 +#define GR_BINDOVERRIDE 0x08
48822 +#define GR_CONNECTOVERRIDE 0x10
48823 +#define GR_SOCK_FAMILY 0x20
48824 +
48825 +static const char * gr_protocols[IPPROTO_MAX] = {
48826 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48827 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48828 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48829 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48830 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48831 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48832 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48833 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48834 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48835 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48836 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48837 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48838 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48839 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48840 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48841 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48842 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48843 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48844 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48845 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48846 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48847 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48848 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48849 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48850 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48851 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48852 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48853 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48854 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48855 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48856 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48857 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48858 + };
48859 +
48860 +static const char * gr_socktypes[SOCK_MAX] = {
48861 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48862 + "unknown:7", "unknown:8", "unknown:9", "packet"
48863 + };
48864 +
48865 +static const char * gr_sockfamilies[AF_MAX+1] = {
48866 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48867 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48868 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48869 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
48870 + };
48871 +
48872 +const char *
48873 +gr_proto_to_name(unsigned char proto)
48874 +{
48875 + return gr_protocols[proto];
48876 +}
48877 +
48878 +const char *
48879 +gr_socktype_to_name(unsigned char type)
48880 +{
48881 + return gr_socktypes[type];
48882 +}
48883 +
48884 +const char *
48885 +gr_sockfamily_to_name(unsigned char family)
48886 +{
48887 + return gr_sockfamilies[family];
48888 +}
48889 +
48890 +int
48891 +gr_search_socket(const int domain, const int type, const int protocol)
48892 +{
48893 + struct acl_subject_label *curr;
48894 + const struct cred *cred = current_cred();
48895 +
48896 + if (unlikely(!gr_acl_is_enabled()))
48897 + goto exit;
48898 +
48899 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48900 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48901 + goto exit; // let the kernel handle it
48902 +
48903 + curr = current->acl;
48904 +
48905 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48906 + /* the family is allowed, if this is PF_INET allow it only if
48907 + the extra sock type/protocol checks pass */
48908 + if (domain == PF_INET)
48909 + goto inet_check;
48910 + goto exit;
48911 + } else {
48912 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48913 + __u32 fakeip = 0;
48914 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48915 + current->role->roletype, cred->uid,
48916 + cred->gid, current->exec_file ?
48917 + gr_to_filename(current->exec_file->f_path.dentry,
48918 + current->exec_file->f_path.mnt) :
48919 + curr->filename, curr->filename,
48920 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48921 + &current->signal->saved_ip);
48922 + goto exit;
48923 + }
48924 + goto exit_fail;
48925 + }
48926 +
48927 +inet_check:
48928 + /* the rest of this checking is for IPv4 only */
48929 + if (!curr->ips)
48930 + goto exit;
48931 +
48932 + if ((curr->ip_type & (1 << type)) &&
48933 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48934 + goto exit;
48935 +
48936 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48937 + /* we don't place acls on raw sockets , and sometimes
48938 + dgram/ip sockets are opened for ioctl and not
48939 + bind/connect, so we'll fake a bind learn log */
48940 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48941 + __u32 fakeip = 0;
48942 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48943 + current->role->roletype, cred->uid,
48944 + cred->gid, current->exec_file ?
48945 + gr_to_filename(current->exec_file->f_path.dentry,
48946 + current->exec_file->f_path.mnt) :
48947 + curr->filename, curr->filename,
48948 + &fakeip, 0, type,
48949 + protocol, GR_CONNECT, &current->signal->saved_ip);
48950 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48951 + __u32 fakeip = 0;
48952 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48953 + current->role->roletype, cred->uid,
48954 + cred->gid, current->exec_file ?
48955 + gr_to_filename(current->exec_file->f_path.dentry,
48956 + current->exec_file->f_path.mnt) :
48957 + curr->filename, curr->filename,
48958 + &fakeip, 0, type,
48959 + protocol, GR_BIND, &current->signal->saved_ip);
48960 + }
48961 + /* we'll log when they use connect or bind */
48962 + goto exit;
48963 + }
48964 +
48965 +exit_fail:
48966 + if (domain == PF_INET)
48967 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48968 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48969 + else
48970 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48971 + gr_socktype_to_name(type), protocol);
48972 +
48973 + return 0;
48974 +exit:
48975 + return 1;
48976 +}
48977 +
48978 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48979 +{
48980 + if ((ip->mode & mode) &&
48981 + (ip_port >= ip->low) &&
48982 + (ip_port <= ip->high) &&
48983 + ((ntohl(ip_addr) & our_netmask) ==
48984 + (ntohl(our_addr) & our_netmask))
48985 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48986 + && (ip->type & (1 << type))) {
48987 + if (ip->mode & GR_INVERT)
48988 + return 2; // specifically denied
48989 + else
48990 + return 1; // allowed
48991 + }
48992 +
48993 + return 0; // not specifically allowed, may continue parsing
48994 +}
48995 +
48996 +static int
48997 +gr_search_connectbind(const int full_mode, struct sock *sk,
48998 + struct sockaddr_in *addr, const int type)
48999 +{
49000 + char iface[IFNAMSIZ] = {0};
49001 + struct acl_subject_label *curr;
49002 + struct acl_ip_label *ip;
49003 + struct inet_sock *isk;
49004 + struct net_device *dev;
49005 + struct in_device *idev;
49006 + unsigned long i;
49007 + int ret;
49008 + int mode = full_mode & (GR_BIND | GR_CONNECT);
49009 + __u32 ip_addr = 0;
49010 + __u32 our_addr;
49011 + __u32 our_netmask;
49012 + char *p;
49013 + __u16 ip_port = 0;
49014 + const struct cred *cred = current_cred();
49015 +
49016 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49017 + return 0;
49018 +
49019 + curr = current->acl;
49020 + isk = inet_sk(sk);
49021 +
49022 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49023 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49024 + addr->sin_addr.s_addr = curr->inaddr_any_override;
49025 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49026 + struct sockaddr_in saddr;
49027 + int err;
49028 +
49029 + saddr.sin_family = AF_INET;
49030 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
49031 + saddr.sin_port = isk->inet_sport;
49032 +
49033 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49034 + if (err)
49035 + return err;
49036 +
49037 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49038 + if (err)
49039 + return err;
49040 + }
49041 +
49042 + if (!curr->ips)
49043 + return 0;
49044 +
49045 + ip_addr = addr->sin_addr.s_addr;
49046 + ip_port = ntohs(addr->sin_port);
49047 +
49048 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49049 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49050 + current->role->roletype, cred->uid,
49051 + cred->gid, current->exec_file ?
49052 + gr_to_filename(current->exec_file->f_path.dentry,
49053 + current->exec_file->f_path.mnt) :
49054 + curr->filename, curr->filename,
49055 + &ip_addr, ip_port, type,
49056 + sk->sk_protocol, mode, &current->signal->saved_ip);
49057 + return 0;
49058 + }
49059 +
49060 + for (i = 0; i < curr->ip_num; i++) {
49061 + ip = *(curr->ips + i);
49062 + if (ip->iface != NULL) {
49063 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
49064 + p = strchr(iface, ':');
49065 + if (p != NULL)
49066 + *p = '\0';
49067 + dev = dev_get_by_name(sock_net(sk), iface);
49068 + if (dev == NULL)
49069 + continue;
49070 + idev = in_dev_get(dev);
49071 + if (idev == NULL) {
49072 + dev_put(dev);
49073 + continue;
49074 + }
49075 + rcu_read_lock();
49076 + for_ifa(idev) {
49077 + if (!strcmp(ip->iface, ifa->ifa_label)) {
49078 + our_addr = ifa->ifa_address;
49079 + our_netmask = 0xffffffff;
49080 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49081 + if (ret == 1) {
49082 + rcu_read_unlock();
49083 + in_dev_put(idev);
49084 + dev_put(dev);
49085 + return 0;
49086 + } else if (ret == 2) {
49087 + rcu_read_unlock();
49088 + in_dev_put(idev);
49089 + dev_put(dev);
49090 + goto denied;
49091 + }
49092 + }
49093 + } endfor_ifa(idev);
49094 + rcu_read_unlock();
49095 + in_dev_put(idev);
49096 + dev_put(dev);
49097 + } else {
49098 + our_addr = ip->addr;
49099 + our_netmask = ip->netmask;
49100 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49101 + if (ret == 1)
49102 + return 0;
49103 + else if (ret == 2)
49104 + goto denied;
49105 + }
49106 + }
49107 +
49108 +denied:
49109 + if (mode == GR_BIND)
49110 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49111 + else if (mode == GR_CONNECT)
49112 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49113 +
49114 + return -EACCES;
49115 +}
49116 +
49117 +int
49118 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49119 +{
49120 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49121 +}
49122 +
49123 +int
49124 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49125 +{
49126 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49127 +}
49128 +
49129 +int gr_search_listen(struct socket *sock)
49130 +{
49131 + struct sock *sk = sock->sk;
49132 + struct sockaddr_in addr;
49133 +
49134 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49135 + addr.sin_port = inet_sk(sk)->inet_sport;
49136 +
49137 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49138 +}
49139 +
49140 +int gr_search_accept(struct socket *sock)
49141 +{
49142 + struct sock *sk = sock->sk;
49143 + struct sockaddr_in addr;
49144 +
49145 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49146 + addr.sin_port = inet_sk(sk)->inet_sport;
49147 +
49148 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49149 +}
49150 +
49151 +int
49152 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49153 +{
49154 + if (addr)
49155 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49156 + else {
49157 + struct sockaddr_in sin;
49158 + const struct inet_sock *inet = inet_sk(sk);
49159 +
49160 + sin.sin_addr.s_addr = inet->inet_daddr;
49161 + sin.sin_port = inet->inet_dport;
49162 +
49163 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49164 + }
49165 +}
49166 +
49167 +int
49168 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49169 +{
49170 + struct sockaddr_in sin;
49171 +
49172 + if (unlikely(skb->len < sizeof (struct udphdr)))
49173 + return 0; // skip this packet
49174 +
49175 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49176 + sin.sin_port = udp_hdr(skb)->source;
49177 +
49178 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49179 +}
49180 diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49181 --- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49182 +++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
49183 @@ -0,0 +1,207 @@
49184 +#include <linux/kernel.h>
49185 +#include <linux/mm.h>
49186 +#include <linux/sched.h>
49187 +#include <linux/poll.h>
49188 +#include <linux/string.h>
49189 +#include <linux/file.h>
49190 +#include <linux/types.h>
49191 +#include <linux/vmalloc.h>
49192 +#include <linux/grinternal.h>
49193 +
49194 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49195 + size_t count, loff_t *ppos);
49196 +extern int gr_acl_is_enabled(void);
49197 +
49198 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49199 +static int gr_learn_attached;
49200 +
49201 +/* use a 512k buffer */
49202 +#define LEARN_BUFFER_SIZE (512 * 1024)
49203 +
49204 +static DEFINE_SPINLOCK(gr_learn_lock);
49205 +static DEFINE_MUTEX(gr_learn_user_mutex);
49206 +
49207 +/* we need to maintain two buffers, so that the kernel context of grlearn
49208 + uses a semaphore around the userspace copying, and the other kernel contexts
49209 + use a spinlock when copying into the buffer, since they cannot sleep
49210 +*/
49211 +static char *learn_buffer;
49212 +static char *learn_buffer_user;
49213 +static int learn_buffer_len;
49214 +static int learn_buffer_user_len;
49215 +
49216 +static ssize_t
49217 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49218 +{
49219 + DECLARE_WAITQUEUE(wait, current);
49220 + ssize_t retval = 0;
49221 +
49222 + add_wait_queue(&learn_wait, &wait);
49223 + set_current_state(TASK_INTERRUPTIBLE);
49224 + do {
49225 + mutex_lock(&gr_learn_user_mutex);
49226 + spin_lock(&gr_learn_lock);
49227 + if (learn_buffer_len)
49228 + break;
49229 + spin_unlock(&gr_learn_lock);
49230 + mutex_unlock(&gr_learn_user_mutex);
49231 + if (file->f_flags & O_NONBLOCK) {
49232 + retval = -EAGAIN;
49233 + goto out;
49234 + }
49235 + if (signal_pending(current)) {
49236 + retval = -ERESTARTSYS;
49237 + goto out;
49238 + }
49239 +
49240 + schedule();
49241 + } while (1);
49242 +
49243 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49244 + learn_buffer_user_len = learn_buffer_len;
49245 + retval = learn_buffer_len;
49246 + learn_buffer_len = 0;
49247 +
49248 + spin_unlock(&gr_learn_lock);
49249 +
49250 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49251 + retval = -EFAULT;
49252 +
49253 + mutex_unlock(&gr_learn_user_mutex);
49254 +out:
49255 + set_current_state(TASK_RUNNING);
49256 + remove_wait_queue(&learn_wait, &wait);
49257 + return retval;
49258 +}
49259 +
49260 +static unsigned int
49261 +poll_learn(struct file * file, poll_table * wait)
49262 +{
49263 + poll_wait(file, &learn_wait, wait);
49264 +
49265 + if (learn_buffer_len)
49266 + return (POLLIN | POLLRDNORM);
49267 +
49268 + return 0;
49269 +}
49270 +
49271 +void
49272 +gr_clear_learn_entries(void)
49273 +{
49274 + char *tmp;
49275 +
49276 + mutex_lock(&gr_learn_user_mutex);
49277 + spin_lock(&gr_learn_lock);
49278 + tmp = learn_buffer;
49279 + learn_buffer = NULL;
49280 + spin_unlock(&gr_learn_lock);
49281 + if (tmp)
49282 + vfree(tmp);
49283 + if (learn_buffer_user != NULL) {
49284 + vfree(learn_buffer_user);
49285 + learn_buffer_user = NULL;
49286 + }
49287 + learn_buffer_len = 0;
49288 + mutex_unlock(&gr_learn_user_mutex);
49289 +
49290 + return;
49291 +}
49292 +
49293 +void
49294 +gr_add_learn_entry(const char *fmt, ...)
49295 +{
49296 + va_list args;
49297 + unsigned int len;
49298 +
49299 + if (!gr_learn_attached)
49300 + return;
49301 +
49302 + spin_lock(&gr_learn_lock);
49303 +
49304 + /* leave a gap at the end so we know when it's "full" but don't have to
49305 + compute the exact length of the string we're trying to append
49306 + */
49307 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49308 + spin_unlock(&gr_learn_lock);
49309 + wake_up_interruptible(&learn_wait);
49310 + return;
49311 + }
49312 + if (learn_buffer == NULL) {
49313 + spin_unlock(&gr_learn_lock);
49314 + return;
49315 + }
49316 +
49317 + va_start(args, fmt);
49318 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49319 + va_end(args);
49320 +
49321 + learn_buffer_len += len + 1;
49322 +
49323 + spin_unlock(&gr_learn_lock);
49324 + wake_up_interruptible(&learn_wait);
49325 +
49326 + return;
49327 +}
49328 +
49329 +static int
49330 +open_learn(struct inode *inode, struct file *file)
49331 +{
49332 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49333 + return -EBUSY;
49334 + if (file->f_mode & FMODE_READ) {
49335 + int retval = 0;
49336 + mutex_lock(&gr_learn_user_mutex);
49337 + if (learn_buffer == NULL)
49338 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49339 + if (learn_buffer_user == NULL)
49340 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49341 + if (learn_buffer == NULL) {
49342 + retval = -ENOMEM;
49343 + goto out_error;
49344 + }
49345 + if (learn_buffer_user == NULL) {
49346 + retval = -ENOMEM;
49347 + goto out_error;
49348 + }
49349 + learn_buffer_len = 0;
49350 + learn_buffer_user_len = 0;
49351 + gr_learn_attached = 1;
49352 +out_error:
49353 + mutex_unlock(&gr_learn_user_mutex);
49354 + return retval;
49355 + }
49356 + return 0;
49357 +}
49358 +
49359 +static int
49360 +close_learn(struct inode *inode, struct file *file)
49361 +{
49362 + if (file->f_mode & FMODE_READ) {
49363 + char *tmp = NULL;
49364 + mutex_lock(&gr_learn_user_mutex);
49365 + spin_lock(&gr_learn_lock);
49366 + tmp = learn_buffer;
49367 + learn_buffer = NULL;
49368 + spin_unlock(&gr_learn_lock);
49369 + if (tmp)
49370 + vfree(tmp);
49371 + if (learn_buffer_user != NULL) {
49372 + vfree(learn_buffer_user);
49373 + learn_buffer_user = NULL;
49374 + }
49375 + learn_buffer_len = 0;
49376 + learn_buffer_user_len = 0;
49377 + gr_learn_attached = 0;
49378 + mutex_unlock(&gr_learn_user_mutex);
49379 + }
49380 +
49381 + return 0;
49382 +}
49383 +
49384 +const struct file_operations grsec_fops = {
49385 + .read = read_learn,
49386 + .write = write_grsec_handler,
49387 + .open = open_learn,
49388 + .release = close_learn,
49389 + .poll = poll_learn,
49390 +};
49391 diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49392 --- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49393 +++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
49394 @@ -0,0 +1,68 @@
49395 +#include <linux/kernel.h>
49396 +#include <linux/sched.h>
49397 +#include <linux/gracl.h>
49398 +#include <linux/grinternal.h>
49399 +
49400 +static const char *restab_log[] = {
49401 + [RLIMIT_CPU] = "RLIMIT_CPU",
49402 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49403 + [RLIMIT_DATA] = "RLIMIT_DATA",
49404 + [RLIMIT_STACK] = "RLIMIT_STACK",
49405 + [RLIMIT_CORE] = "RLIMIT_CORE",
49406 + [RLIMIT_RSS] = "RLIMIT_RSS",
49407 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49408 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49409 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49410 + [RLIMIT_AS] = "RLIMIT_AS",
49411 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49412 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49413 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49414 + [RLIMIT_NICE] = "RLIMIT_NICE",
49415 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49416 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49417 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49418 +};
49419 +
49420 +void
49421 +gr_log_resource(const struct task_struct *task,
49422 + const int res, const unsigned long wanted, const int gt)
49423 +{
49424 + const struct cred *cred;
49425 + unsigned long rlim;
49426 +
49427 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49428 + return;
49429 +
49430 + // not yet supported resource
49431 + if (unlikely(!restab_log[res]))
49432 + return;
49433 +
49434 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49435 + rlim = task_rlimit_max(task, res);
49436 + else
49437 + rlim = task_rlimit(task, res);
49438 +
49439 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49440 + return;
49441 +
49442 + rcu_read_lock();
49443 + cred = __task_cred(task);
49444 +
49445 + if (res == RLIMIT_NPROC &&
49446 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49447 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49448 + goto out_rcu_unlock;
49449 + else if (res == RLIMIT_MEMLOCK &&
49450 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49451 + goto out_rcu_unlock;
49452 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49453 + goto out_rcu_unlock;
49454 + rcu_read_unlock();
49455 +
49456 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49457 +
49458 + return;
49459 +out_rcu_unlock:
49460 + rcu_read_unlock();
49461 + return;
49462 +}
49463 diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49464 --- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49465 +++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49466 @@ -0,0 +1,299 @@
49467 +#include <linux/kernel.h>
49468 +#include <linux/mm.h>
49469 +#include <asm/uaccess.h>
49470 +#include <asm/errno.h>
49471 +#include <asm/mman.h>
49472 +#include <net/sock.h>
49473 +#include <linux/file.h>
49474 +#include <linux/fs.h>
49475 +#include <linux/net.h>
49476 +#include <linux/in.h>
49477 +#include <linux/slab.h>
49478 +#include <linux/types.h>
49479 +#include <linux/sched.h>
49480 +#include <linux/timer.h>
49481 +#include <linux/gracl.h>
49482 +#include <linux/grsecurity.h>
49483 +#include <linux/grinternal.h>
49484 +
49485 +static struct crash_uid *uid_set;
49486 +static unsigned short uid_used;
49487 +static DEFINE_SPINLOCK(gr_uid_lock);
49488 +extern rwlock_t gr_inode_lock;
49489 +extern struct acl_subject_label *
49490 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49491 + struct acl_role_label *role);
49492 +
49493 +#ifdef CONFIG_BTRFS_FS
49494 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49495 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49496 +#endif
49497 +
49498 +static inline dev_t __get_dev(const struct dentry *dentry)
49499 +{
49500 +#ifdef CONFIG_BTRFS_FS
49501 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49502 + return get_btrfs_dev_from_inode(dentry->d_inode);
49503 + else
49504 +#endif
49505 + return dentry->d_inode->i_sb->s_dev;
49506 +}
49507 +
49508 +int
49509 +gr_init_uidset(void)
49510 +{
49511 + uid_set =
49512 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49513 + uid_used = 0;
49514 +
49515 + return uid_set ? 1 : 0;
49516 +}
49517 +
49518 +void
49519 +gr_free_uidset(void)
49520 +{
49521 + if (uid_set)
49522 + kfree(uid_set);
49523 +
49524 + return;
49525 +}
49526 +
49527 +int
49528 +gr_find_uid(const uid_t uid)
49529 +{
49530 + struct crash_uid *tmp = uid_set;
49531 + uid_t buid;
49532 + int low = 0, high = uid_used - 1, mid;
49533 +
49534 + while (high >= low) {
49535 + mid = (low + high) >> 1;
49536 + buid = tmp[mid].uid;
49537 + if (buid == uid)
49538 + return mid;
49539 + if (buid > uid)
49540 + high = mid - 1;
49541 + if (buid < uid)
49542 + low = mid + 1;
49543 + }
49544 +
49545 + return -1;
49546 +}
49547 +
49548 +static __inline__ void
49549 +gr_insertsort(void)
49550 +{
49551 + unsigned short i, j;
49552 + struct crash_uid index;
49553 +
49554 + for (i = 1; i < uid_used; i++) {
49555 + index = uid_set[i];
49556 + j = i;
49557 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49558 + uid_set[j] = uid_set[j - 1];
49559 + j--;
49560 + }
49561 + uid_set[j] = index;
49562 + }
49563 +
49564 + return;
49565 +}
49566 +
49567 +static __inline__ void
49568 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49569 +{
49570 + int loc;
49571 +
49572 + if (uid_used == GR_UIDTABLE_MAX)
49573 + return;
49574 +
49575 + loc = gr_find_uid(uid);
49576 +
49577 + if (loc >= 0) {
49578 + uid_set[loc].expires = expires;
49579 + return;
49580 + }
49581 +
49582 + uid_set[uid_used].uid = uid;
49583 + uid_set[uid_used].expires = expires;
49584 + uid_used++;
49585 +
49586 + gr_insertsort();
49587 +
49588 + return;
49589 +}
49590 +
49591 +void
49592 +gr_remove_uid(const unsigned short loc)
49593 +{
49594 + unsigned short i;
49595 +
49596 + for (i = loc + 1; i < uid_used; i++)
49597 + uid_set[i - 1] = uid_set[i];
49598 +
49599 + uid_used--;
49600 +
49601 + return;
49602 +}
49603 +
49604 +int
49605 +gr_check_crash_uid(const uid_t uid)
49606 +{
49607 + int loc;
49608 + int ret = 0;
49609 +
49610 + if (unlikely(!gr_acl_is_enabled()))
49611 + return 0;
49612 +
49613 + spin_lock(&gr_uid_lock);
49614 + loc = gr_find_uid(uid);
49615 +
49616 + if (loc < 0)
49617 + goto out_unlock;
49618 +
49619 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49620 + gr_remove_uid(loc);
49621 + else
49622 + ret = 1;
49623 +
49624 +out_unlock:
49625 + spin_unlock(&gr_uid_lock);
49626 + return ret;
49627 +}
49628 +
49629 +static __inline__ int
49630 +proc_is_setxid(const struct cred *cred)
49631 +{
49632 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49633 + cred->uid != cred->fsuid)
49634 + return 1;
49635 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49636 + cred->gid != cred->fsgid)
49637 + return 1;
49638 +
49639 + return 0;
49640 +}
49641 +
49642 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49643 +
49644 +void
49645 +gr_handle_crash(struct task_struct *task, const int sig)
49646 +{
49647 + struct acl_subject_label *curr;
49648 + struct acl_subject_label *curr2;
49649 + struct task_struct *tsk, *tsk2;
49650 + const struct cred *cred;
49651 + const struct cred *cred2;
49652 +
49653 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49654 + return;
49655 +
49656 + if (unlikely(!gr_acl_is_enabled()))
49657 + return;
49658 +
49659 + curr = task->acl;
49660 +
49661 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49662 + return;
49663 +
49664 + if (time_before_eq(curr->expires, get_seconds())) {
49665 + curr->expires = 0;
49666 + curr->crashes = 0;
49667 + }
49668 +
49669 + curr->crashes++;
49670 +
49671 + if (!curr->expires)
49672 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49673 +
49674 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49675 + time_after(curr->expires, get_seconds())) {
49676 + rcu_read_lock();
49677 + cred = __task_cred(task);
49678 + if (cred->uid && proc_is_setxid(cred)) {
49679 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49680 + spin_lock(&gr_uid_lock);
49681 + gr_insert_uid(cred->uid, curr->expires);
49682 + spin_unlock(&gr_uid_lock);
49683 + curr->expires = 0;
49684 + curr->crashes = 0;
49685 + read_lock(&tasklist_lock);
49686 + do_each_thread(tsk2, tsk) {
49687 + cred2 = __task_cred(tsk);
49688 + if (tsk != task && cred2->uid == cred->uid)
49689 + gr_fake_force_sig(SIGKILL, tsk);
49690 + } while_each_thread(tsk2, tsk);
49691 + read_unlock(&tasklist_lock);
49692 + } else {
49693 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49694 + read_lock(&tasklist_lock);
49695 + do_each_thread(tsk2, tsk) {
49696 + if (likely(tsk != task)) {
49697 + curr2 = tsk->acl;
49698 +
49699 + if (curr2->device == curr->device &&
49700 + curr2->inode == curr->inode)
49701 + gr_fake_force_sig(SIGKILL, tsk);
49702 + }
49703 + } while_each_thread(tsk2, tsk);
49704 + read_unlock(&tasklist_lock);
49705 + }
49706 + rcu_read_unlock();
49707 + }
49708 +
49709 + return;
49710 +}
49711 +
49712 +int
49713 +gr_check_crash_exec(const struct file *filp)
49714 +{
49715 + struct acl_subject_label *curr;
49716 +
49717 + if (unlikely(!gr_acl_is_enabled()))
49718 + return 0;
49719 +
49720 + read_lock(&gr_inode_lock);
49721 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49722 + __get_dev(filp->f_path.dentry),
49723 + current->role);
49724 + read_unlock(&gr_inode_lock);
49725 +
49726 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49727 + (!curr->crashes && !curr->expires))
49728 + return 0;
49729 +
49730 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49731 + time_after(curr->expires, get_seconds()))
49732 + return 1;
49733 + else if (time_before_eq(curr->expires, get_seconds())) {
49734 + curr->crashes = 0;
49735 + curr->expires = 0;
49736 + }
49737 +
49738 + return 0;
49739 +}
49740 +
49741 +void
49742 +gr_handle_alertkill(struct task_struct *task)
49743 +{
49744 + struct acl_subject_label *curracl;
49745 + __u32 curr_ip;
49746 + struct task_struct *p, *p2;
49747 +
49748 + if (unlikely(!gr_acl_is_enabled()))
49749 + return;
49750 +
49751 + curracl = task->acl;
49752 + curr_ip = task->signal->curr_ip;
49753 +
49754 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49755 + read_lock(&tasklist_lock);
49756 + do_each_thread(p2, p) {
49757 + if (p->signal->curr_ip == curr_ip)
49758 + gr_fake_force_sig(SIGKILL, p);
49759 + } while_each_thread(p2, p);
49760 + read_unlock(&tasklist_lock);
49761 + } else if (curracl->mode & GR_KILLPROC)
49762 + gr_fake_force_sig(SIGKILL, task);
49763 +
49764 + return;
49765 +}
49766 diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
49767 --- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49768 +++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
49769 @@ -0,0 +1,40 @@
49770 +#include <linux/kernel.h>
49771 +#include <linux/mm.h>
49772 +#include <linux/sched.h>
49773 +#include <linux/file.h>
49774 +#include <linux/ipc.h>
49775 +#include <linux/gracl.h>
49776 +#include <linux/grsecurity.h>
49777 +#include <linux/grinternal.h>
49778 +
49779 +int
49780 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49781 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49782 +{
49783 + struct task_struct *task;
49784 +
49785 + if (!gr_acl_is_enabled())
49786 + return 1;
49787 +
49788 + rcu_read_lock();
49789 + read_lock(&tasklist_lock);
49790 +
49791 + task = find_task_by_vpid(shm_cprid);
49792 +
49793 + if (unlikely(!task))
49794 + task = find_task_by_vpid(shm_lapid);
49795 +
49796 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49797 + (task->pid == shm_lapid)) &&
49798 + (task->acl->mode & GR_PROTSHM) &&
49799 + (task->acl != current->acl))) {
49800 + read_unlock(&tasklist_lock);
49801 + rcu_read_unlock();
49802 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49803 + return 0;
49804 + }
49805 + read_unlock(&tasklist_lock);
49806 + rcu_read_unlock();
49807 +
49808 + return 1;
49809 +}
49810 diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
49811 --- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49812 +++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
49813 @@ -0,0 +1,19 @@
49814 +#include <linux/kernel.h>
49815 +#include <linux/sched.h>
49816 +#include <linux/fs.h>
49817 +#include <linux/file.h>
49818 +#include <linux/grsecurity.h>
49819 +#include <linux/grinternal.h>
49820 +
49821 +void
49822 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49823 +{
49824 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49825 + if ((grsec_enable_chdir && grsec_enable_group &&
49826 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49827 + !grsec_enable_group)) {
49828 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49829 + }
49830 +#endif
49831 + return;
49832 +}
49833 diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
49834 --- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49835 +++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
49836 @@ -0,0 +1,349 @@
49837 +#include <linux/kernel.h>
49838 +#include <linux/module.h>
49839 +#include <linux/sched.h>
49840 +#include <linux/file.h>
49841 +#include <linux/fs.h>
49842 +#include <linux/mount.h>
49843 +#include <linux/types.h>
49844 +#include <linux/pid_namespace.h>
49845 +#include <linux/grsecurity.h>
49846 +#include <linux/grinternal.h>
49847 +
49848 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49849 +{
49850 +#ifdef CONFIG_GRKERNSEC
49851 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49852 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49853 + task->gr_is_chrooted = 1;
49854 + else
49855 + task->gr_is_chrooted = 0;
49856 +
49857 + task->gr_chroot_dentry = path->dentry;
49858 +#endif
49859 + return;
49860 +}
49861 +
49862 +void gr_clear_chroot_entries(struct task_struct *task)
49863 +{
49864 +#ifdef CONFIG_GRKERNSEC
49865 + task->gr_is_chrooted = 0;
49866 + task->gr_chroot_dentry = NULL;
49867 +#endif
49868 + return;
49869 +}
49870 +
49871 +int
49872 +gr_handle_chroot_unix(const pid_t pid)
49873 +{
49874 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49875 + struct task_struct *p;
49876 +
49877 + if (unlikely(!grsec_enable_chroot_unix))
49878 + return 1;
49879 +
49880 + if (likely(!proc_is_chrooted(current)))
49881 + return 1;
49882 +
49883 + rcu_read_lock();
49884 + read_lock(&tasklist_lock);
49885 + p = find_task_by_vpid_unrestricted(pid);
49886 + if (unlikely(p && !have_same_root(current, p))) {
49887 + read_unlock(&tasklist_lock);
49888 + rcu_read_unlock();
49889 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49890 + return 0;
49891 + }
49892 + read_unlock(&tasklist_lock);
49893 + rcu_read_unlock();
49894 +#endif
49895 + return 1;
49896 +}
49897 +
49898 +int
49899 +gr_handle_chroot_nice(void)
49900 +{
49901 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49902 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49903 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49904 + return -EPERM;
49905 + }
49906 +#endif
49907 + return 0;
49908 +}
49909 +
49910 +int
49911 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49912 +{
49913 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49914 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49915 + && proc_is_chrooted(current)) {
49916 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49917 + return -EACCES;
49918 + }
49919 +#endif
49920 + return 0;
49921 +}
49922 +
49923 +int
49924 +gr_handle_chroot_rawio(const struct inode *inode)
49925 +{
49926 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49927 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49928 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49929 + return 1;
49930 +#endif
49931 + return 0;
49932 +}
49933 +
49934 +int
49935 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49936 +{
49937 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49938 + struct task_struct *p;
49939 + int ret = 0;
49940 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49941 + return ret;
49942 +
49943 + read_lock(&tasklist_lock);
49944 + do_each_pid_task(pid, type, p) {
49945 + if (!have_same_root(current, p)) {
49946 + ret = 1;
49947 + goto out;
49948 + }
49949 + } while_each_pid_task(pid, type, p);
49950 +out:
49951 + read_unlock(&tasklist_lock);
49952 + return ret;
49953 +#endif
49954 + return 0;
49955 +}
49956 +
49957 +int
49958 +gr_pid_is_chrooted(struct task_struct *p)
49959 +{
49960 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49961 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49962 + return 0;
49963 +
49964 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49965 + !have_same_root(current, p)) {
49966 + return 1;
49967 + }
49968 +#endif
49969 + return 0;
49970 +}
49971 +
49972 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49973 +
49974 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49975 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49976 +{
49977 + struct path path, currentroot;
49978 + int ret = 0;
49979 +
49980 + path.dentry = (struct dentry *)u_dentry;
49981 + path.mnt = (struct vfsmount *)u_mnt;
49982 + get_fs_root(current->fs, &currentroot);
49983 + if (path_is_under(&path, &currentroot))
49984 + ret = 1;
49985 + path_put(&currentroot);
49986 +
49987 + return ret;
49988 +}
49989 +#endif
49990 +
49991 +int
49992 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49993 +{
49994 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49995 + if (!grsec_enable_chroot_fchdir)
49996 + return 1;
49997 +
49998 + if (!proc_is_chrooted(current))
49999 + return 1;
50000 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50001 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50002 + return 0;
50003 + }
50004 +#endif
50005 + return 1;
50006 +}
50007 +
50008 +int
50009 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50010 + const time_t shm_createtime)
50011 +{
50012 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50013 + struct task_struct *p;
50014 + time_t starttime;
50015 +
50016 + if (unlikely(!grsec_enable_chroot_shmat))
50017 + return 1;
50018 +
50019 + if (likely(!proc_is_chrooted(current)))
50020 + return 1;
50021 +
50022 + rcu_read_lock();
50023 + read_lock(&tasklist_lock);
50024 +
50025 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50026 + starttime = p->start_time.tv_sec;
50027 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50028 + if (have_same_root(current, p)) {
50029 + goto allow;
50030 + } else {
50031 + read_unlock(&tasklist_lock);
50032 + rcu_read_unlock();
50033 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50034 + return 0;
50035 + }
50036 + }
50037 + /* creator exited, pid reuse, fall through to next check */
50038 + }
50039 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50040 + if (unlikely(!have_same_root(current, p))) {
50041 + read_unlock(&tasklist_lock);
50042 + rcu_read_unlock();
50043 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50044 + return 0;
50045 + }
50046 + }
50047 +
50048 +allow:
50049 + read_unlock(&tasklist_lock);
50050 + rcu_read_unlock();
50051 +#endif
50052 + return 1;
50053 +}
50054 +
50055 +void
50056 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50057 +{
50058 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50059 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50060 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50061 +#endif
50062 + return;
50063 +}
50064 +
50065 +int
50066 +gr_handle_chroot_mknod(const struct dentry *dentry,
50067 + const struct vfsmount *mnt, const int mode)
50068 +{
50069 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50070 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50071 + proc_is_chrooted(current)) {
50072 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50073 + return -EPERM;
50074 + }
50075 +#endif
50076 + return 0;
50077 +}
50078 +
50079 +int
50080 +gr_handle_chroot_mount(const struct dentry *dentry,
50081 + const struct vfsmount *mnt, const char *dev_name)
50082 +{
50083 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50084 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50085 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50086 + return -EPERM;
50087 + }
50088 +#endif
50089 + return 0;
50090 +}
50091 +
50092 +int
50093 +gr_handle_chroot_pivot(void)
50094 +{
50095 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50096 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50097 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50098 + return -EPERM;
50099 + }
50100 +#endif
50101 + return 0;
50102 +}
50103 +
50104 +int
50105 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50106 +{
50107 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50108 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50109 + !gr_is_outside_chroot(dentry, mnt)) {
50110 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50111 + return -EPERM;
50112 + }
50113 +#endif
50114 + return 0;
50115 +}
50116 +
50117 +int
50118 +gr_handle_chroot_caps(struct path *path)
50119 +{
50120 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50121 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50122 + (init_task.fs->root.dentry != path->dentry) &&
50123 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50124 +
50125 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50126 + const struct cred *old = current_cred();
50127 + struct cred *new = prepare_creds();
50128 + if (new == NULL)
50129 + return 1;
50130 +
50131 + new->cap_permitted = cap_drop(old->cap_permitted,
50132 + chroot_caps);
50133 + new->cap_inheritable = cap_drop(old->cap_inheritable,
50134 + chroot_caps);
50135 + new->cap_effective = cap_drop(old->cap_effective,
50136 + chroot_caps);
50137 +
50138 + commit_creds(new);
50139 +
50140 + return 0;
50141 + }
50142 +#endif
50143 + return 0;
50144 +}
50145 +
50146 +int
50147 +gr_handle_chroot_sysctl(const int op)
50148 +{
50149 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50150 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50151 + proc_is_chrooted(current))
50152 + return -EACCES;
50153 +#endif
50154 + return 0;
50155 +}
50156 +
50157 +void
50158 +gr_handle_chroot_chdir(struct path *path)
50159 +{
50160 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50161 + if (grsec_enable_chroot_chdir)
50162 + set_fs_pwd(current->fs, path);
50163 +#endif
50164 + return;
50165 +}
50166 +
50167 +int
50168 +gr_handle_chroot_chmod(const struct dentry *dentry,
50169 + const struct vfsmount *mnt, const int mode)
50170 +{
50171 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50172 + /* allow chmod +s on directories, but not files */
50173 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50174 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50175 + proc_is_chrooted(current)) {
50176 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50177 + return -EPERM;
50178 + }
50179 +#endif
50180 + return 0;
50181 +}
50182 +
50183 +#ifdef CONFIG_SECURITY
50184 +EXPORT_SYMBOL(gr_handle_chroot_caps);
50185 +#endif
50186 diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50187 --- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50188 +++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
50189 @@ -0,0 +1,447 @@
50190 +#include <linux/kernel.h>
50191 +#include <linux/module.h>
50192 +#include <linux/sched.h>
50193 +#include <linux/file.h>
50194 +#include <linux/fs.h>
50195 +#include <linux/kdev_t.h>
50196 +#include <linux/net.h>
50197 +#include <linux/in.h>
50198 +#include <linux/ip.h>
50199 +#include <linux/skbuff.h>
50200 +#include <linux/sysctl.h>
50201 +
50202 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50203 +void
50204 +pax_set_initial_flags(struct linux_binprm *bprm)
50205 +{
50206 + return;
50207 +}
50208 +#endif
50209 +
50210 +#ifdef CONFIG_SYSCTL
50211 +__u32
50212 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50213 +{
50214 + return 0;
50215 +}
50216 +#endif
50217 +
50218 +#ifdef CONFIG_TASKSTATS
50219 +int gr_is_taskstats_denied(int pid)
50220 +{
50221 + return 0;
50222 +}
50223 +#endif
50224 +
50225 +int
50226 +gr_acl_is_enabled(void)
50227 +{
50228 + return 0;
50229 +}
50230 +
50231 +int
50232 +gr_handle_rawio(const struct inode *inode)
50233 +{
50234 + return 0;
50235 +}
50236 +
50237 +void
50238 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50239 +{
50240 + return;
50241 +}
50242 +
50243 +int
50244 +gr_handle_ptrace(struct task_struct *task, const long request)
50245 +{
50246 + return 0;
50247 +}
50248 +
50249 +int
50250 +gr_handle_proc_ptrace(struct task_struct *task)
50251 +{
50252 + return 0;
50253 +}
50254 +
50255 +void
50256 +gr_learn_resource(const struct task_struct *task,
50257 + const int res, const unsigned long wanted, const int gt)
50258 +{
50259 + return;
50260 +}
50261 +
50262 +int
50263 +gr_set_acls(const int type)
50264 +{
50265 + return 0;
50266 +}
50267 +
50268 +int
50269 +gr_check_hidden_task(const struct task_struct *tsk)
50270 +{
50271 + return 0;
50272 +}
50273 +
50274 +int
50275 +gr_check_protected_task(const struct task_struct *task)
50276 +{
50277 + return 0;
50278 +}
50279 +
50280 +int
50281 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50282 +{
50283 + return 0;
50284 +}
50285 +
50286 +void
50287 +gr_copy_label(struct task_struct *tsk)
50288 +{
50289 + return;
50290 +}
50291 +
50292 +void
50293 +gr_set_pax_flags(struct task_struct *task)
50294 +{
50295 + return;
50296 +}
50297 +
50298 +int
50299 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50300 + const int unsafe_share)
50301 +{
50302 + return 0;
50303 +}
50304 +
50305 +void
50306 +gr_handle_delete(const ino_t ino, const dev_t dev)
50307 +{
50308 + return;
50309 +}
50310 +
50311 +void
50312 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50313 +{
50314 + return;
50315 +}
50316 +
50317 +void
50318 +gr_handle_crash(struct task_struct *task, const int sig)
50319 +{
50320 + return;
50321 +}
50322 +
50323 +int
50324 +gr_check_crash_exec(const struct file *filp)
50325 +{
50326 + return 0;
50327 +}
50328 +
50329 +int
50330 +gr_check_crash_uid(const uid_t uid)
50331 +{
50332 + return 0;
50333 +}
50334 +
50335 +void
50336 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50337 + struct dentry *old_dentry,
50338 + struct dentry *new_dentry,
50339 + struct vfsmount *mnt, const __u8 replace)
50340 +{
50341 + return;
50342 +}
50343 +
50344 +int
50345 +gr_search_socket(const int family, const int type, const int protocol)
50346 +{
50347 + return 1;
50348 +}
50349 +
50350 +int
50351 +gr_search_connectbind(const int mode, const struct socket *sock,
50352 + const struct sockaddr_in *addr)
50353 +{
50354 + return 0;
50355 +}
50356 +
50357 +int
50358 +gr_is_capable(const int cap)
50359 +{
50360 + return 1;
50361 +}
50362 +
50363 +int
50364 +gr_is_capable_nolog(const int cap)
50365 +{
50366 + return 1;
50367 +}
50368 +
50369 +void
50370 +gr_handle_alertkill(struct task_struct *task)
50371 +{
50372 + return;
50373 +}
50374 +
50375 +__u32
50376 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50377 +{
50378 + return 1;
50379 +}
50380 +
50381 +__u32
50382 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50383 + const struct vfsmount * mnt)
50384 +{
50385 + return 1;
50386 +}
50387 +
50388 +__u32
50389 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50390 + const int fmode)
50391 +{
50392 + return 1;
50393 +}
50394 +
50395 +__u32
50396 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50397 +{
50398 + return 1;
50399 +}
50400 +
50401 +__u32
50402 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50403 +{
50404 + return 1;
50405 +}
50406 +
50407 +int
50408 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50409 + unsigned int *vm_flags)
50410 +{
50411 + return 1;
50412 +}
50413 +
50414 +__u32
50415 +gr_acl_handle_truncate(const struct dentry * dentry,
50416 + const struct vfsmount * mnt)
50417 +{
50418 + return 1;
50419 +}
50420 +
50421 +__u32
50422 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50423 +{
50424 + return 1;
50425 +}
50426 +
50427 +__u32
50428 +gr_acl_handle_access(const struct dentry * dentry,
50429 + const struct vfsmount * mnt, const int fmode)
50430 +{
50431 + return 1;
50432 +}
50433 +
50434 +__u32
50435 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50436 + mode_t mode)
50437 +{
50438 + return 1;
50439 +}
50440 +
50441 +__u32
50442 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50443 + mode_t mode)
50444 +{
50445 + return 1;
50446 +}
50447 +
50448 +__u32
50449 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50450 +{
50451 + return 1;
50452 +}
50453 +
50454 +__u32
50455 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50456 +{
50457 + return 1;
50458 +}
50459 +
50460 +void
50461 +grsecurity_init(void)
50462 +{
50463 + return;
50464 +}
50465 +
50466 +__u32
50467 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50468 + const struct dentry * parent_dentry,
50469 + const struct vfsmount * parent_mnt,
50470 + const int mode)
50471 +{
50472 + return 1;
50473 +}
50474 +
50475 +__u32
50476 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50477 + const struct dentry * parent_dentry,
50478 + const struct vfsmount * parent_mnt)
50479 +{
50480 + return 1;
50481 +}
50482 +
50483 +__u32
50484 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50485 + const struct dentry * parent_dentry,
50486 + const struct vfsmount * parent_mnt, const char *from)
50487 +{
50488 + return 1;
50489 +}
50490 +
50491 +__u32
50492 +gr_acl_handle_link(const struct dentry * new_dentry,
50493 + const struct dentry * parent_dentry,
50494 + const struct vfsmount * parent_mnt,
50495 + const struct dentry * old_dentry,
50496 + const struct vfsmount * old_mnt, const char *to)
50497 +{
50498 + return 1;
50499 +}
50500 +
50501 +int
50502 +gr_acl_handle_rename(const struct dentry *new_dentry,
50503 + const struct dentry *parent_dentry,
50504 + const struct vfsmount *parent_mnt,
50505 + const struct dentry *old_dentry,
50506 + const struct inode *old_parent_inode,
50507 + const struct vfsmount *old_mnt, const char *newname)
50508 +{
50509 + return 0;
50510 +}
50511 +
50512 +int
50513 +gr_acl_handle_filldir(const struct file *file, const char *name,
50514 + const int namelen, const ino_t ino)
50515 +{
50516 + return 1;
50517 +}
50518 +
50519 +int
50520 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50521 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50522 +{
50523 + return 1;
50524 +}
50525 +
50526 +int
50527 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50528 +{
50529 + return 0;
50530 +}
50531 +
50532 +int
50533 +gr_search_accept(const struct socket *sock)
50534 +{
50535 + return 0;
50536 +}
50537 +
50538 +int
50539 +gr_search_listen(const struct socket *sock)
50540 +{
50541 + return 0;
50542 +}
50543 +
50544 +int
50545 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50546 +{
50547 + return 0;
50548 +}
50549 +
50550 +__u32
50551 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50552 +{
50553 + return 1;
50554 +}
50555 +
50556 +__u32
50557 +gr_acl_handle_creat(const struct dentry * dentry,
50558 + const struct dentry * p_dentry,
50559 + const struct vfsmount * p_mnt, const int fmode,
50560 + const int imode)
50561 +{
50562 + return 1;
50563 +}
50564 +
50565 +void
50566 +gr_acl_handle_exit(void)
50567 +{
50568 + return;
50569 +}
50570 +
50571 +int
50572 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50573 +{
50574 + return 1;
50575 +}
50576 +
50577 +void
50578 +gr_set_role_label(const uid_t uid, const gid_t gid)
50579 +{
50580 + return;
50581 +}
50582 +
50583 +int
50584 +gr_acl_handle_procpidmem(const struct task_struct *task)
50585 +{
50586 + return 0;
50587 +}
50588 +
50589 +int
50590 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50591 +{
50592 + return 0;
50593 +}
50594 +
50595 +int
50596 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50597 +{
50598 + return 0;
50599 +}
50600 +
50601 +void
50602 +gr_set_kernel_label(struct task_struct *task)
50603 +{
50604 + return;
50605 +}
50606 +
50607 +int
50608 +gr_check_user_change(int real, int effective, int fs)
50609 +{
50610 + return 0;
50611 +}
50612 +
50613 +int
50614 +gr_check_group_change(int real, int effective, int fs)
50615 +{
50616 + return 0;
50617 +}
50618 +
50619 +int gr_acl_enable_at_secure(void)
50620 +{
50621 + return 0;
50622 +}
50623 +
50624 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50625 +{
50626 + return dentry->d_inode->i_sb->s_dev;
50627 +}
50628 +
50629 +EXPORT_SYMBOL(gr_is_capable);
50630 +EXPORT_SYMBOL(gr_is_capable_nolog);
50631 +EXPORT_SYMBOL(gr_learn_resource);
50632 +EXPORT_SYMBOL(gr_set_kernel_label);
50633 +#ifdef CONFIG_SECURITY
50634 +EXPORT_SYMBOL(gr_check_user_change);
50635 +EXPORT_SYMBOL(gr_check_group_change);
50636 +#endif
50637 diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
50638 --- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50639 +++ linux-3.0.4/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
50640 @@ -0,0 +1,72 @@
50641 +#include <linux/kernel.h>
50642 +#include <linux/sched.h>
50643 +#include <linux/file.h>
50644 +#include <linux/binfmts.h>
50645 +#include <linux/fs.h>
50646 +#include <linux/types.h>
50647 +#include <linux/grdefs.h>
50648 +#include <linux/grsecurity.h>
50649 +#include <linux/grinternal.h>
50650 +#include <linux/capability.h>
50651 +
50652 +#include <asm/uaccess.h>
50653 +
50654 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50655 +static char gr_exec_arg_buf[132];
50656 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50657 +#endif
50658 +
50659 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
50660 +
50661 +void
50662 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
50663 +{
50664 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50665 + char *grarg = gr_exec_arg_buf;
50666 + unsigned int i, x, execlen = 0;
50667 + char c;
50668 +
50669 + if (!((grsec_enable_execlog && grsec_enable_group &&
50670 + in_group_p(grsec_audit_gid))
50671 + || (grsec_enable_execlog && !grsec_enable_group)))
50672 + return;
50673 +
50674 + mutex_lock(&gr_exec_arg_mutex);
50675 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50676 +
50677 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50678 + const char __user *p;
50679 + unsigned int len;
50680 +
50681 + p = get_user_arg_ptr(argv, i);
50682 + if (IS_ERR(p))
50683 + goto log;
50684 +
50685 + len = strnlen_user(p, 128 - execlen);
50686 + if (len > 128 - execlen)
50687 + len = 128 - execlen;
50688 + else if (len > 0)
50689 + len--;
50690 + if (copy_from_user(grarg + execlen, p, len))
50691 + goto log;
50692 +
50693 + /* rewrite unprintable characters */
50694 + for (x = 0; x < len; x++) {
50695 + c = *(grarg + execlen + x);
50696 + if (c < 32 || c > 126)
50697 + *(grarg + execlen + x) = ' ';
50698 + }
50699 +
50700 + execlen += len;
50701 + *(grarg + execlen) = ' ';
50702 + *(grarg + execlen + 1) = '\0';
50703 + execlen++;
50704 + }
50705 +
50706 + log:
50707 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50708 + bprm->file->f_path.mnt, grarg);
50709 + mutex_unlock(&gr_exec_arg_mutex);
50710 +#endif
50711 + return;
50712 +}
50713 diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
50714 --- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50715 +++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
50716 @@ -0,0 +1,24 @@
50717 +#include <linux/kernel.h>
50718 +#include <linux/sched.h>
50719 +#include <linux/fs.h>
50720 +#include <linux/file.h>
50721 +#include <linux/grinternal.h>
50722 +
50723 +int
50724 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50725 + const struct dentry *dir, const int flag, const int acc_mode)
50726 +{
50727 +#ifdef CONFIG_GRKERNSEC_FIFO
50728 + const struct cred *cred = current_cred();
50729 +
50730 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50731 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50732 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50733 + (cred->fsuid != dentry->d_inode->i_uid)) {
50734 + if (!inode_permission(dentry->d_inode, acc_mode))
50735 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50736 + return -EACCES;
50737 + }
50738 +#endif
50739 + return 0;
50740 +}
50741 diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
50742 --- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50743 +++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
50744 @@ -0,0 +1,23 @@
50745 +#include <linux/kernel.h>
50746 +#include <linux/sched.h>
50747 +#include <linux/grsecurity.h>
50748 +#include <linux/grinternal.h>
50749 +#include <linux/errno.h>
50750 +
50751 +void
50752 +gr_log_forkfail(const int retval)
50753 +{
50754 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50755 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50756 + switch (retval) {
50757 + case -EAGAIN:
50758 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50759 + break;
50760 + case -ENOMEM:
50761 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50762 + break;
50763 + }
50764 + }
50765 +#endif
50766 + return;
50767 +}
50768 diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
50769 --- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50770 +++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
50771 @@ -0,0 +1,269 @@
50772 +#include <linux/kernel.h>
50773 +#include <linux/sched.h>
50774 +#include <linux/mm.h>
50775 +#include <linux/gracl.h>
50776 +#include <linux/slab.h>
50777 +#include <linux/vmalloc.h>
50778 +#include <linux/percpu.h>
50779 +#include <linux/module.h>
50780 +
50781 +int grsec_enable_brute;
50782 +int grsec_enable_link;
50783 +int grsec_enable_dmesg;
50784 +int grsec_enable_harden_ptrace;
50785 +int grsec_enable_fifo;
50786 +int grsec_enable_execlog;
50787 +int grsec_enable_signal;
50788 +int grsec_enable_forkfail;
50789 +int grsec_enable_audit_ptrace;
50790 +int grsec_enable_time;
50791 +int grsec_enable_audit_textrel;
50792 +int grsec_enable_group;
50793 +int grsec_audit_gid;
50794 +int grsec_enable_chdir;
50795 +int grsec_enable_mount;
50796 +int grsec_enable_rofs;
50797 +int grsec_enable_chroot_findtask;
50798 +int grsec_enable_chroot_mount;
50799 +int grsec_enable_chroot_shmat;
50800 +int grsec_enable_chroot_fchdir;
50801 +int grsec_enable_chroot_double;
50802 +int grsec_enable_chroot_pivot;
50803 +int grsec_enable_chroot_chdir;
50804 +int grsec_enable_chroot_chmod;
50805 +int grsec_enable_chroot_mknod;
50806 +int grsec_enable_chroot_nice;
50807 +int grsec_enable_chroot_execlog;
50808 +int grsec_enable_chroot_caps;
50809 +int grsec_enable_chroot_sysctl;
50810 +int grsec_enable_chroot_unix;
50811 +int grsec_enable_tpe;
50812 +int grsec_tpe_gid;
50813 +int grsec_enable_blackhole;
50814 +#ifdef CONFIG_IPV6_MODULE
50815 +EXPORT_SYMBOL(grsec_enable_blackhole);
50816 +#endif
50817 +int grsec_lastack_retries;
50818 +int grsec_enable_tpe_all;
50819 +int grsec_enable_tpe_invert;
50820 +int grsec_enable_socket_all;
50821 +int grsec_socket_all_gid;
50822 +int grsec_enable_socket_client;
50823 +int grsec_socket_client_gid;
50824 +int grsec_enable_socket_server;
50825 +int grsec_socket_server_gid;
50826 +int grsec_resource_logging;
50827 +int grsec_disable_privio;
50828 +int grsec_enable_log_rwxmaps;
50829 +int grsec_lock;
50830 +
50831 +DEFINE_SPINLOCK(grsec_alert_lock);
50832 +unsigned long grsec_alert_wtime = 0;
50833 +unsigned long grsec_alert_fyet = 0;
50834 +
50835 +DEFINE_SPINLOCK(grsec_audit_lock);
50836 +
50837 +DEFINE_RWLOCK(grsec_exec_file_lock);
50838 +
50839 +char *gr_shared_page[4];
50840 +
50841 +char *gr_alert_log_fmt;
50842 +char *gr_audit_log_fmt;
50843 +char *gr_alert_log_buf;
50844 +char *gr_audit_log_buf;
50845 +
50846 +extern struct gr_arg *gr_usermode;
50847 +extern unsigned char *gr_system_salt;
50848 +extern unsigned char *gr_system_sum;
50849 +
50850 +void __init
50851 +grsecurity_init(void)
50852 +{
50853 + int j;
50854 + /* create the per-cpu shared pages */
50855 +
50856 +#ifdef CONFIG_X86
50857 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50858 +#endif
50859 +
50860 + for (j = 0; j < 4; j++) {
50861 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50862 + if (gr_shared_page[j] == NULL) {
50863 + panic("Unable to allocate grsecurity shared page");
50864 + return;
50865 + }
50866 + }
50867 +
50868 + /* allocate log buffers */
50869 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50870 + if (!gr_alert_log_fmt) {
50871 + panic("Unable to allocate grsecurity alert log format buffer");
50872 + return;
50873 + }
50874 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50875 + if (!gr_audit_log_fmt) {
50876 + panic("Unable to allocate grsecurity audit log format buffer");
50877 + return;
50878 + }
50879 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50880 + if (!gr_alert_log_buf) {
50881 + panic("Unable to allocate grsecurity alert log buffer");
50882 + return;
50883 + }
50884 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50885 + if (!gr_audit_log_buf) {
50886 + panic("Unable to allocate grsecurity audit log buffer");
50887 + return;
50888 + }
50889 +
50890 + /* allocate memory for authentication structure */
50891 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50892 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50893 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50894 +
50895 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50896 + panic("Unable to allocate grsecurity authentication structure");
50897 + return;
50898 + }
50899 +
50900 +
50901 +#ifdef CONFIG_GRKERNSEC_IO
50902 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50903 + grsec_disable_privio = 1;
50904 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50905 + grsec_disable_privio = 1;
50906 +#else
50907 + grsec_disable_privio = 0;
50908 +#endif
50909 +#endif
50910 +
50911 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50912 + /* for backward compatibility, tpe_invert always defaults to on if
50913 + enabled in the kernel
50914 + */
50915 + grsec_enable_tpe_invert = 1;
50916 +#endif
50917 +
50918 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50919 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50920 + grsec_lock = 1;
50921 +#endif
50922 +
50923 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50924 + grsec_enable_audit_textrel = 1;
50925 +#endif
50926 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50927 + grsec_enable_log_rwxmaps = 1;
50928 +#endif
50929 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50930 + grsec_enable_group = 1;
50931 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50932 +#endif
50933 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50934 + grsec_enable_chdir = 1;
50935 +#endif
50936 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50937 + grsec_enable_harden_ptrace = 1;
50938 +#endif
50939 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50940 + grsec_enable_mount = 1;
50941 +#endif
50942 +#ifdef CONFIG_GRKERNSEC_LINK
50943 + grsec_enable_link = 1;
50944 +#endif
50945 +#ifdef CONFIG_GRKERNSEC_BRUTE
50946 + grsec_enable_brute = 1;
50947 +#endif
50948 +#ifdef CONFIG_GRKERNSEC_DMESG
50949 + grsec_enable_dmesg = 1;
50950 +#endif
50951 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50952 + grsec_enable_blackhole = 1;
50953 + grsec_lastack_retries = 4;
50954 +#endif
50955 +#ifdef CONFIG_GRKERNSEC_FIFO
50956 + grsec_enable_fifo = 1;
50957 +#endif
50958 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50959 + grsec_enable_execlog = 1;
50960 +#endif
50961 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50962 + grsec_enable_signal = 1;
50963 +#endif
50964 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50965 + grsec_enable_forkfail = 1;
50966 +#endif
50967 +#ifdef CONFIG_GRKERNSEC_TIME
50968 + grsec_enable_time = 1;
50969 +#endif
50970 +#ifdef CONFIG_GRKERNSEC_RESLOG
50971 + grsec_resource_logging = 1;
50972 +#endif
50973 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50974 + grsec_enable_chroot_findtask = 1;
50975 +#endif
50976 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50977 + grsec_enable_chroot_unix = 1;
50978 +#endif
50979 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50980 + grsec_enable_chroot_mount = 1;
50981 +#endif
50982 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50983 + grsec_enable_chroot_fchdir = 1;
50984 +#endif
50985 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50986 + grsec_enable_chroot_shmat = 1;
50987 +#endif
50988 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50989 + grsec_enable_audit_ptrace = 1;
50990 +#endif
50991 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50992 + grsec_enable_chroot_double = 1;
50993 +#endif
50994 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50995 + grsec_enable_chroot_pivot = 1;
50996 +#endif
50997 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50998 + grsec_enable_chroot_chdir = 1;
50999 +#endif
51000 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51001 + grsec_enable_chroot_chmod = 1;
51002 +#endif
51003 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51004 + grsec_enable_chroot_mknod = 1;
51005 +#endif
51006 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51007 + grsec_enable_chroot_nice = 1;
51008 +#endif
51009 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51010 + grsec_enable_chroot_execlog = 1;
51011 +#endif
51012 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51013 + grsec_enable_chroot_caps = 1;
51014 +#endif
51015 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51016 + grsec_enable_chroot_sysctl = 1;
51017 +#endif
51018 +#ifdef CONFIG_GRKERNSEC_TPE
51019 + grsec_enable_tpe = 1;
51020 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51021 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51022 + grsec_enable_tpe_all = 1;
51023 +#endif
51024 +#endif
51025 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51026 + grsec_enable_socket_all = 1;
51027 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51028 +#endif
51029 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51030 + grsec_enable_socket_client = 1;
51031 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51032 +#endif
51033 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51034 + grsec_enable_socket_server = 1;
51035 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51036 +#endif
51037 +#endif
51038 +
51039 + return;
51040 +}
51041 diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51042 --- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51043 +++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51044 @@ -0,0 +1,43 @@
51045 +#include <linux/kernel.h>
51046 +#include <linux/sched.h>
51047 +#include <linux/fs.h>
51048 +#include <linux/file.h>
51049 +#include <linux/grinternal.h>
51050 +
51051 +int
51052 +gr_handle_follow_link(const struct inode *parent,
51053 + const struct inode *inode,
51054 + const struct dentry *dentry, const struct vfsmount *mnt)
51055 +{
51056 +#ifdef CONFIG_GRKERNSEC_LINK
51057 + const struct cred *cred = current_cred();
51058 +
51059 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51060 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51061 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51062 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51063 + return -EACCES;
51064 + }
51065 +#endif
51066 + return 0;
51067 +}
51068 +
51069 +int
51070 +gr_handle_hardlink(const struct dentry *dentry,
51071 + const struct vfsmount *mnt,
51072 + struct inode *inode, const int mode, const char *to)
51073 +{
51074 +#ifdef CONFIG_GRKERNSEC_LINK
51075 + const struct cred *cred = current_cred();
51076 +
51077 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51078 + (!S_ISREG(mode) || (mode & S_ISUID) ||
51079 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51080 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51081 + !capable(CAP_FOWNER) && cred->uid) {
51082 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51083 + return -EPERM;
51084 + }
51085 +#endif
51086 + return 0;
51087 +}
51088 diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51089 --- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51090 +++ linux-3.0.4/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
51091 @@ -0,0 +1,310 @@
51092 +#include <linux/kernel.h>
51093 +#include <linux/sched.h>
51094 +#include <linux/file.h>
51095 +#include <linux/tty.h>
51096 +#include <linux/fs.h>
51097 +#include <linux/grinternal.h>
51098 +
51099 +#ifdef CONFIG_TREE_PREEMPT_RCU
51100 +#define DISABLE_PREEMPT() preempt_disable()
51101 +#define ENABLE_PREEMPT() preempt_enable()
51102 +#else
51103 +#define DISABLE_PREEMPT()
51104 +#define ENABLE_PREEMPT()
51105 +#endif
51106 +
51107 +#define BEGIN_LOCKS(x) \
51108 + DISABLE_PREEMPT(); \
51109 + rcu_read_lock(); \
51110 + read_lock(&tasklist_lock); \
51111 + read_lock(&grsec_exec_file_lock); \
51112 + if (x != GR_DO_AUDIT) \
51113 + spin_lock(&grsec_alert_lock); \
51114 + else \
51115 + spin_lock(&grsec_audit_lock)
51116 +
51117 +#define END_LOCKS(x) \
51118 + if (x != GR_DO_AUDIT) \
51119 + spin_unlock(&grsec_alert_lock); \
51120 + else \
51121 + spin_unlock(&grsec_audit_lock); \
51122 + read_unlock(&grsec_exec_file_lock); \
51123 + read_unlock(&tasklist_lock); \
51124 + rcu_read_unlock(); \
51125 + ENABLE_PREEMPT(); \
51126 + if (x == GR_DONT_AUDIT) \
51127 + gr_handle_alertkill(current)
51128 +
51129 +enum {
51130 + FLOODING,
51131 + NO_FLOODING
51132 +};
51133 +
51134 +extern char *gr_alert_log_fmt;
51135 +extern char *gr_audit_log_fmt;
51136 +extern char *gr_alert_log_buf;
51137 +extern char *gr_audit_log_buf;
51138 +
51139 +static int gr_log_start(int audit)
51140 +{
51141 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51142 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51143 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51144 +
51145 + if (audit == GR_DO_AUDIT)
51146 + goto set_fmt;
51147 +
51148 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51149 + grsec_alert_wtime = jiffies;
51150 + grsec_alert_fyet = 0;
51151 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51152 + grsec_alert_fyet++;
51153 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51154 + grsec_alert_wtime = jiffies;
51155 + grsec_alert_fyet++;
51156 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51157 + return FLOODING;
51158 + } else return FLOODING;
51159 +
51160 +set_fmt:
51161 + memset(buf, 0, PAGE_SIZE);
51162 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
51163 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51164 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51165 + } else if (current->signal->curr_ip) {
51166 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51167 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51168 + } else if (gr_acl_is_enabled()) {
51169 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51170 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51171 + } else {
51172 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
51173 + strcpy(buf, fmt);
51174 + }
51175 +
51176 + return NO_FLOODING;
51177 +}
51178 +
51179 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51180 + __attribute__ ((format (printf, 2, 0)));
51181 +
51182 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51183 +{
51184 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51185 + unsigned int len = strlen(buf);
51186 +
51187 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51188 +
51189 + return;
51190 +}
51191 +
51192 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51193 + __attribute__ ((format (printf, 2, 3)));
51194 +
51195 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51196 +{
51197 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51198 + unsigned int len = strlen(buf);
51199 + va_list ap;
51200 +
51201 + va_start(ap, msg);
51202 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51203 + va_end(ap);
51204 +
51205 + return;
51206 +}
51207 +
51208 +static void gr_log_end(int audit)
51209 +{
51210 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51211 + unsigned int len = strlen(buf);
51212 +
51213 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51214 + printk("%s\n", buf);
51215 +
51216 + return;
51217 +}
51218 +
51219 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51220 +{
51221 + int logtype;
51222 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51223 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51224 + void *voidptr = NULL;
51225 + int num1 = 0, num2 = 0;
51226 + unsigned long ulong1 = 0, ulong2 = 0;
51227 + struct dentry *dentry = NULL;
51228 + struct vfsmount *mnt = NULL;
51229 + struct file *file = NULL;
51230 + struct task_struct *task = NULL;
51231 + const struct cred *cred, *pcred;
51232 + va_list ap;
51233 +
51234 + BEGIN_LOCKS(audit);
51235 + logtype = gr_log_start(audit);
51236 + if (logtype == FLOODING) {
51237 + END_LOCKS(audit);
51238 + return;
51239 + }
51240 + va_start(ap, argtypes);
51241 + switch (argtypes) {
51242 + case GR_TTYSNIFF:
51243 + task = va_arg(ap, struct task_struct *);
51244 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51245 + break;
51246 + case GR_SYSCTL_HIDDEN:
51247 + str1 = va_arg(ap, char *);
51248 + gr_log_middle_varargs(audit, msg, result, str1);
51249 + break;
51250 + case GR_RBAC:
51251 + dentry = va_arg(ap, struct dentry *);
51252 + mnt = va_arg(ap, struct vfsmount *);
51253 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51254 + break;
51255 + case GR_RBAC_STR:
51256 + dentry = va_arg(ap, struct dentry *);
51257 + mnt = va_arg(ap, struct vfsmount *);
51258 + str1 = va_arg(ap, char *);
51259 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51260 + break;
51261 + case GR_STR_RBAC:
51262 + str1 = va_arg(ap, char *);
51263 + dentry = va_arg(ap, struct dentry *);
51264 + mnt = va_arg(ap, struct vfsmount *);
51265 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51266 + break;
51267 + case GR_RBAC_MODE2:
51268 + dentry = va_arg(ap, struct dentry *);
51269 + mnt = va_arg(ap, struct vfsmount *);
51270 + str1 = va_arg(ap, char *);
51271 + str2 = va_arg(ap, char *);
51272 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51273 + break;
51274 + case GR_RBAC_MODE3:
51275 + dentry = va_arg(ap, struct dentry *);
51276 + mnt = va_arg(ap, struct vfsmount *);
51277 + str1 = va_arg(ap, char *);
51278 + str2 = va_arg(ap, char *);
51279 + str3 = va_arg(ap, char *);
51280 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51281 + break;
51282 + case GR_FILENAME:
51283 + dentry = va_arg(ap, struct dentry *);
51284 + mnt = va_arg(ap, struct vfsmount *);
51285 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51286 + break;
51287 + case GR_STR_FILENAME:
51288 + str1 = va_arg(ap, char *);
51289 + dentry = va_arg(ap, struct dentry *);
51290 + mnt = va_arg(ap, struct vfsmount *);
51291 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51292 + break;
51293 + case GR_FILENAME_STR:
51294 + dentry = va_arg(ap, struct dentry *);
51295 + mnt = va_arg(ap, struct vfsmount *);
51296 + str1 = va_arg(ap, char *);
51297 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51298 + break;
51299 + case GR_FILENAME_TWO_INT:
51300 + dentry = va_arg(ap, struct dentry *);
51301 + mnt = va_arg(ap, struct vfsmount *);
51302 + num1 = va_arg(ap, int);
51303 + num2 = va_arg(ap, int);
51304 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51305 + break;
51306 + case GR_FILENAME_TWO_INT_STR:
51307 + dentry = va_arg(ap, struct dentry *);
51308 + mnt = va_arg(ap, struct vfsmount *);
51309 + num1 = va_arg(ap, int);
51310 + num2 = va_arg(ap, int);
51311 + str1 = va_arg(ap, char *);
51312 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51313 + break;
51314 + case GR_TEXTREL:
51315 + file = va_arg(ap, struct file *);
51316 + ulong1 = va_arg(ap, unsigned long);
51317 + ulong2 = va_arg(ap, unsigned long);
51318 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51319 + break;
51320 + case GR_PTRACE:
51321 + task = va_arg(ap, struct task_struct *);
51322 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51323 + break;
51324 + case GR_RESOURCE:
51325 + task = va_arg(ap, struct task_struct *);
51326 + cred = __task_cred(task);
51327 + pcred = __task_cred(task->real_parent);
51328 + ulong1 = va_arg(ap, unsigned long);
51329 + str1 = va_arg(ap, char *);
51330 + ulong2 = va_arg(ap, unsigned long);
51331 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51332 + break;
51333 + case GR_CAP:
51334 + task = va_arg(ap, struct task_struct *);
51335 + cred = __task_cred(task);
51336 + pcred = __task_cred(task->real_parent);
51337 + str1 = va_arg(ap, char *);
51338 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51339 + break;
51340 + case GR_SIG:
51341 + str1 = va_arg(ap, char *);
51342 + voidptr = va_arg(ap, void *);
51343 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51344 + break;
51345 + case GR_SIG2:
51346 + task = va_arg(ap, struct task_struct *);
51347 + cred = __task_cred(task);
51348 + pcred = __task_cred(task->real_parent);
51349 + num1 = va_arg(ap, int);
51350 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51351 + break;
51352 + case GR_CRASH1:
51353 + task = va_arg(ap, struct task_struct *);
51354 + cred = __task_cred(task);
51355 + pcred = __task_cred(task->real_parent);
51356 + ulong1 = va_arg(ap, unsigned long);
51357 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51358 + break;
51359 + case GR_CRASH2:
51360 + task = va_arg(ap, struct task_struct *);
51361 + cred = __task_cred(task);
51362 + pcred = __task_cred(task->real_parent);
51363 + ulong1 = va_arg(ap, unsigned long);
51364 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51365 + break;
51366 + case GR_RWXMAP:
51367 + file = va_arg(ap, struct file *);
51368 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51369 + break;
51370 + case GR_PSACCT:
51371 + {
51372 + unsigned int wday, cday;
51373 + __u8 whr, chr;
51374 + __u8 wmin, cmin;
51375 + __u8 wsec, csec;
51376 + char cur_tty[64] = { 0 };
51377 + char parent_tty[64] = { 0 };
51378 +
51379 + task = va_arg(ap, struct task_struct *);
51380 + wday = va_arg(ap, unsigned int);
51381 + cday = va_arg(ap, unsigned int);
51382 + whr = va_arg(ap, int);
51383 + chr = va_arg(ap, int);
51384 + wmin = va_arg(ap, int);
51385 + cmin = va_arg(ap, int);
51386 + wsec = va_arg(ap, int);
51387 + csec = va_arg(ap, int);
51388 + ulong1 = va_arg(ap, unsigned long);
51389 + cred = __task_cred(task);
51390 + pcred = __task_cred(task->real_parent);
51391 +
51392 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51393 + }
51394 + break;
51395 + default:
51396 + gr_log_middle(audit, msg, ap);
51397 + }
51398 + va_end(ap);
51399 + gr_log_end(audit);
51400 + END_LOCKS(audit);
51401 +}
51402 diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51403 --- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51404 +++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
51405 @@ -0,0 +1,33 @@
51406 +#include <linux/kernel.h>
51407 +#include <linux/sched.h>
51408 +#include <linux/mm.h>
51409 +#include <linux/mman.h>
51410 +#include <linux/grinternal.h>
51411 +
51412 +void
51413 +gr_handle_ioperm(void)
51414 +{
51415 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51416 + return;
51417 +}
51418 +
51419 +void
51420 +gr_handle_iopl(void)
51421 +{
51422 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51423 + return;
51424 +}
51425 +
51426 +void
51427 +gr_handle_mem_readwrite(u64 from, u64 to)
51428 +{
51429 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51430 + return;
51431 +}
51432 +
51433 +void
51434 +gr_handle_vm86(void)
51435 +{
51436 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51437 + return;
51438 +}
51439 diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
51440 --- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51441 +++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
51442 @@ -0,0 +1,62 @@
51443 +#include <linux/kernel.h>
51444 +#include <linux/sched.h>
51445 +#include <linux/mount.h>
51446 +#include <linux/grsecurity.h>
51447 +#include <linux/grinternal.h>
51448 +
51449 +void
51450 +gr_log_remount(const char *devname, const int retval)
51451 +{
51452 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51453 + if (grsec_enable_mount && (retval >= 0))
51454 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51455 +#endif
51456 + return;
51457 +}
51458 +
51459 +void
51460 +gr_log_unmount(const char *devname, const int retval)
51461 +{
51462 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51463 + if (grsec_enable_mount && (retval >= 0))
51464 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51465 +#endif
51466 + return;
51467 +}
51468 +
51469 +void
51470 +gr_log_mount(const char *from, const char *to, const int retval)
51471 +{
51472 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51473 + if (grsec_enable_mount && (retval >= 0))
51474 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51475 +#endif
51476 + return;
51477 +}
51478 +
51479 +int
51480 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51481 +{
51482 +#ifdef CONFIG_GRKERNSEC_ROFS
51483 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51484 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51485 + return -EPERM;
51486 + } else
51487 + return 0;
51488 +#endif
51489 + return 0;
51490 +}
51491 +
51492 +int
51493 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51494 +{
51495 +#ifdef CONFIG_GRKERNSEC_ROFS
51496 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51497 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51498 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51499 + return -EPERM;
51500 + } else
51501 + return 0;
51502 +#endif
51503 + return 0;
51504 +}
51505 diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
51506 --- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51507 +++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
51508 @@ -0,0 +1,36 @@
51509 +#include <linux/kernel.h>
51510 +#include <linux/sched.h>
51511 +#include <linux/mm.h>
51512 +#include <linux/file.h>
51513 +#include <linux/grinternal.h>
51514 +#include <linux/grsecurity.h>
51515 +
51516 +void
51517 +gr_log_textrel(struct vm_area_struct * vma)
51518 +{
51519 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51520 + if (grsec_enable_audit_textrel)
51521 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51522 +#endif
51523 + return;
51524 +}
51525 +
51526 +void
51527 +gr_log_rwxmmap(struct file *file)
51528 +{
51529 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51530 + if (grsec_enable_log_rwxmaps)
51531 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51532 +#endif
51533 + return;
51534 +}
51535 +
51536 +void
51537 +gr_log_rwxmprotect(struct file *file)
51538 +{
51539 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51540 + if (grsec_enable_log_rwxmaps)
51541 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51542 +#endif
51543 + return;
51544 +}
51545 diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
51546 --- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51547 +++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
51548 @@ -0,0 +1,14 @@
51549 +#include <linux/kernel.h>
51550 +#include <linux/sched.h>
51551 +#include <linux/grinternal.h>
51552 +#include <linux/grsecurity.h>
51553 +
51554 +void
51555 +gr_audit_ptrace(struct task_struct *task)
51556 +{
51557 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51558 + if (grsec_enable_audit_ptrace)
51559 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51560 +#endif
51561 + return;
51562 +}
51563 diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
51564 --- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51565 +++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
51566 @@ -0,0 +1,206 @@
51567 +#include <linux/kernel.h>
51568 +#include <linux/sched.h>
51569 +#include <linux/delay.h>
51570 +#include <linux/grsecurity.h>
51571 +#include <linux/grinternal.h>
51572 +#include <linux/hardirq.h>
51573 +
51574 +char *signames[] = {
51575 + [SIGSEGV] = "Segmentation fault",
51576 + [SIGILL] = "Illegal instruction",
51577 + [SIGABRT] = "Abort",
51578 + [SIGBUS] = "Invalid alignment/Bus error"
51579 +};
51580 +
51581 +void
51582 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51583 +{
51584 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51585 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51586 + (sig == SIGABRT) || (sig == SIGBUS))) {
51587 + if (t->pid == current->pid) {
51588 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51589 + } else {
51590 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51591 + }
51592 + }
51593 +#endif
51594 + return;
51595 +}
51596 +
51597 +int
51598 +gr_handle_signal(const struct task_struct *p, const int sig)
51599 +{
51600 +#ifdef CONFIG_GRKERNSEC
51601 + if (current->pid > 1 && gr_check_protected_task(p)) {
51602 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51603 + return -EPERM;
51604 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51605 + return -EPERM;
51606 + }
51607 +#endif
51608 + return 0;
51609 +}
51610 +
51611 +#ifdef CONFIG_GRKERNSEC
51612 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51613 +
51614 +int gr_fake_force_sig(int sig, struct task_struct *t)
51615 +{
51616 + unsigned long int flags;
51617 + int ret, blocked, ignored;
51618 + struct k_sigaction *action;
51619 +
51620 + spin_lock_irqsave(&t->sighand->siglock, flags);
51621 + action = &t->sighand->action[sig-1];
51622 + ignored = action->sa.sa_handler == SIG_IGN;
51623 + blocked = sigismember(&t->blocked, sig);
51624 + if (blocked || ignored) {
51625 + action->sa.sa_handler = SIG_DFL;
51626 + if (blocked) {
51627 + sigdelset(&t->blocked, sig);
51628 + recalc_sigpending_and_wake(t);
51629 + }
51630 + }
51631 + if (action->sa.sa_handler == SIG_DFL)
51632 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51633 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51634 +
51635 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51636 +
51637 + return ret;
51638 +}
51639 +#endif
51640 +
51641 +#ifdef CONFIG_GRKERNSEC_BRUTE
51642 +#define GR_USER_BAN_TIME (15 * 60)
51643 +
51644 +static int __get_dumpable(unsigned long mm_flags)
51645 +{
51646 + int ret;
51647 +
51648 + ret = mm_flags & MMF_DUMPABLE_MASK;
51649 + return (ret >= 2) ? 2 : ret;
51650 +}
51651 +#endif
51652 +
51653 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51654 +{
51655 +#ifdef CONFIG_GRKERNSEC_BRUTE
51656 + uid_t uid = 0;
51657 +
51658 + if (!grsec_enable_brute)
51659 + return;
51660 +
51661 + rcu_read_lock();
51662 + read_lock(&tasklist_lock);
51663 + read_lock(&grsec_exec_file_lock);
51664 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51665 + p->real_parent->brute = 1;
51666 + else {
51667 + const struct cred *cred = __task_cred(p), *cred2;
51668 + struct task_struct *tsk, *tsk2;
51669 +
51670 + if (!__get_dumpable(mm_flags) && cred->uid) {
51671 + struct user_struct *user;
51672 +
51673 + uid = cred->uid;
51674 +
51675 + /* this is put upon execution past expiration */
51676 + user = find_user(uid);
51677 + if (user == NULL)
51678 + goto unlock;
51679 + user->banned = 1;
51680 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51681 + if (user->ban_expires == ~0UL)
51682 + user->ban_expires--;
51683 +
51684 + do_each_thread(tsk2, tsk) {
51685 + cred2 = __task_cred(tsk);
51686 + if (tsk != p && cred2->uid == uid)
51687 + gr_fake_force_sig(SIGKILL, tsk);
51688 + } while_each_thread(tsk2, tsk);
51689 + }
51690 + }
51691 +unlock:
51692 + read_unlock(&grsec_exec_file_lock);
51693 + read_unlock(&tasklist_lock);
51694 + rcu_read_unlock();
51695 +
51696 + if (uid)
51697 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51698 +
51699 +#endif
51700 + return;
51701 +}
51702 +
51703 +void gr_handle_brute_check(void)
51704 +{
51705 +#ifdef CONFIG_GRKERNSEC_BRUTE
51706 + if (current->brute)
51707 + msleep(30 * 1000);
51708 +#endif
51709 + return;
51710 +}
51711 +
51712 +void gr_handle_kernel_exploit(void)
51713 +{
51714 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51715 + const struct cred *cred;
51716 + struct task_struct *tsk, *tsk2;
51717 + struct user_struct *user;
51718 + uid_t uid;
51719 +
51720 + if (in_irq() || in_serving_softirq() || in_nmi())
51721 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51722 +
51723 + uid = current_uid();
51724 +
51725 + if (uid == 0)
51726 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51727 + else {
51728 + /* kill all the processes of this user, hold a reference
51729 + to their creds struct, and prevent them from creating
51730 + another process until system reset
51731 + */
51732 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51733 + /* we intentionally leak this ref */
51734 + user = get_uid(current->cred->user);
51735 + if (user) {
51736 + user->banned = 1;
51737 + user->ban_expires = ~0UL;
51738 + }
51739 +
51740 + read_lock(&tasklist_lock);
51741 + do_each_thread(tsk2, tsk) {
51742 + cred = __task_cred(tsk);
51743 + if (cred->uid == uid)
51744 + gr_fake_force_sig(SIGKILL, tsk);
51745 + } while_each_thread(tsk2, tsk);
51746 + read_unlock(&tasklist_lock);
51747 + }
51748 +#endif
51749 +}
51750 +
51751 +int __gr_process_user_ban(struct user_struct *user)
51752 +{
51753 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51754 + if (unlikely(user->banned)) {
51755 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51756 + user->banned = 0;
51757 + user->ban_expires = 0;
51758 + free_uid(user);
51759 + } else
51760 + return -EPERM;
51761 + }
51762 +#endif
51763 + return 0;
51764 +}
51765 +
51766 +int gr_process_user_ban(void)
51767 +{
51768 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51769 + return __gr_process_user_ban(current->cred->user);
51770 +#endif
51771 + return 0;
51772 +}
51773 diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
51774 --- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51775 +++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
51776 @@ -0,0 +1,244 @@
51777 +#include <linux/kernel.h>
51778 +#include <linux/module.h>
51779 +#include <linux/sched.h>
51780 +#include <linux/file.h>
51781 +#include <linux/net.h>
51782 +#include <linux/in.h>
51783 +#include <linux/ip.h>
51784 +#include <net/sock.h>
51785 +#include <net/inet_sock.h>
51786 +#include <linux/grsecurity.h>
51787 +#include <linux/grinternal.h>
51788 +#include <linux/gracl.h>
51789 +
51790 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51791 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51792 +
51793 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51794 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51795 +
51796 +#ifdef CONFIG_UNIX_MODULE
51797 +EXPORT_SYMBOL(gr_acl_handle_unix);
51798 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51799 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51800 +EXPORT_SYMBOL(gr_handle_create);
51801 +#endif
51802 +
51803 +#ifdef CONFIG_GRKERNSEC
51804 +#define gr_conn_table_size 32749
51805 +struct conn_table_entry {
51806 + struct conn_table_entry *next;
51807 + struct signal_struct *sig;
51808 +};
51809 +
51810 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51811 +DEFINE_SPINLOCK(gr_conn_table_lock);
51812 +
51813 +extern const char * gr_socktype_to_name(unsigned char type);
51814 +extern const char * gr_proto_to_name(unsigned char proto);
51815 +extern const char * gr_sockfamily_to_name(unsigned char family);
51816 +
51817 +static __inline__ int
51818 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51819 +{
51820 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51821 +}
51822 +
51823 +static __inline__ int
51824 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51825 + __u16 sport, __u16 dport)
51826 +{
51827 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51828 + sig->gr_sport == sport && sig->gr_dport == dport))
51829 + return 1;
51830 + else
51831 + return 0;
51832 +}
51833 +
51834 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51835 +{
51836 + struct conn_table_entry **match;
51837 + unsigned int index;
51838 +
51839 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51840 + sig->gr_sport, sig->gr_dport,
51841 + gr_conn_table_size);
51842 +
51843 + newent->sig = sig;
51844 +
51845 + match = &gr_conn_table[index];
51846 + newent->next = *match;
51847 + *match = newent;
51848 +
51849 + return;
51850 +}
51851 +
51852 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51853 +{
51854 + struct conn_table_entry *match, *last = NULL;
51855 + unsigned int index;
51856 +
51857 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51858 + sig->gr_sport, sig->gr_dport,
51859 + gr_conn_table_size);
51860 +
51861 + match = gr_conn_table[index];
51862 + while (match && !conn_match(match->sig,
51863 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51864 + sig->gr_dport)) {
51865 + last = match;
51866 + match = match->next;
51867 + }
51868 +
51869 + if (match) {
51870 + if (last)
51871 + last->next = match->next;
51872 + else
51873 + gr_conn_table[index] = NULL;
51874 + kfree(match);
51875 + }
51876 +
51877 + return;
51878 +}
51879 +
51880 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51881 + __u16 sport, __u16 dport)
51882 +{
51883 + struct conn_table_entry *match;
51884 + unsigned int index;
51885 +
51886 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51887 +
51888 + match = gr_conn_table[index];
51889 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51890 + match = match->next;
51891 +
51892 + if (match)
51893 + return match->sig;
51894 + else
51895 + return NULL;
51896 +}
51897 +
51898 +#endif
51899 +
51900 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51901 +{
51902 +#ifdef CONFIG_GRKERNSEC
51903 + struct signal_struct *sig = task->signal;
51904 + struct conn_table_entry *newent;
51905 +
51906 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51907 + if (newent == NULL)
51908 + return;
51909 + /* no bh lock needed since we are called with bh disabled */
51910 + spin_lock(&gr_conn_table_lock);
51911 + gr_del_task_from_ip_table_nolock(sig);
51912 + sig->gr_saddr = inet->inet_rcv_saddr;
51913 + sig->gr_daddr = inet->inet_daddr;
51914 + sig->gr_sport = inet->inet_sport;
51915 + sig->gr_dport = inet->inet_dport;
51916 + gr_add_to_task_ip_table_nolock(sig, newent);
51917 + spin_unlock(&gr_conn_table_lock);
51918 +#endif
51919 + return;
51920 +}
51921 +
51922 +void gr_del_task_from_ip_table(struct task_struct *task)
51923 +{
51924 +#ifdef CONFIG_GRKERNSEC
51925 + spin_lock_bh(&gr_conn_table_lock);
51926 + gr_del_task_from_ip_table_nolock(task->signal);
51927 + spin_unlock_bh(&gr_conn_table_lock);
51928 +#endif
51929 + return;
51930 +}
51931 +
51932 +void
51933 +gr_attach_curr_ip(const struct sock *sk)
51934 +{
51935 +#ifdef CONFIG_GRKERNSEC
51936 + struct signal_struct *p, *set;
51937 + const struct inet_sock *inet = inet_sk(sk);
51938 +
51939 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51940 + return;
51941 +
51942 + set = current->signal;
51943 +
51944 + spin_lock_bh(&gr_conn_table_lock);
51945 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
51946 + inet->inet_dport, inet->inet_sport);
51947 + if (unlikely(p != NULL)) {
51948 + set->curr_ip = p->curr_ip;
51949 + set->used_accept = 1;
51950 + gr_del_task_from_ip_table_nolock(p);
51951 + spin_unlock_bh(&gr_conn_table_lock);
51952 + return;
51953 + }
51954 + spin_unlock_bh(&gr_conn_table_lock);
51955 +
51956 + set->curr_ip = inet->inet_daddr;
51957 + set->used_accept = 1;
51958 +#endif
51959 + return;
51960 +}
51961 +
51962 +int
51963 +gr_handle_sock_all(const int family, const int type, const int protocol)
51964 +{
51965 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51966 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51967 + (family != AF_UNIX)) {
51968 + if (family == AF_INET)
51969 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51970 + else
51971 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51972 + return -EACCES;
51973 + }
51974 +#endif
51975 + return 0;
51976 +}
51977 +
51978 +int
51979 +gr_handle_sock_server(const struct sockaddr *sck)
51980 +{
51981 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51982 + if (grsec_enable_socket_server &&
51983 + in_group_p(grsec_socket_server_gid) &&
51984 + sck && (sck->sa_family != AF_UNIX) &&
51985 + (sck->sa_family != AF_LOCAL)) {
51986 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51987 + return -EACCES;
51988 + }
51989 +#endif
51990 + return 0;
51991 +}
51992 +
51993 +int
51994 +gr_handle_sock_server_other(const struct sock *sck)
51995 +{
51996 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51997 + if (grsec_enable_socket_server &&
51998 + in_group_p(grsec_socket_server_gid) &&
51999 + sck && (sck->sk_family != AF_UNIX) &&
52000 + (sck->sk_family != AF_LOCAL)) {
52001 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52002 + return -EACCES;
52003 + }
52004 +#endif
52005 + return 0;
52006 +}
52007 +
52008 +int
52009 +gr_handle_sock_client(const struct sockaddr *sck)
52010 +{
52011 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52012 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52013 + sck && (sck->sa_family != AF_UNIX) &&
52014 + (sck->sa_family != AF_LOCAL)) {
52015 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52016 + return -EACCES;
52017 + }
52018 +#endif
52019 + return 0;
52020 +}
52021 diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52022 --- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52023 +++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
52024 @@ -0,0 +1,433 @@
52025 +#include <linux/kernel.h>
52026 +#include <linux/sched.h>
52027 +#include <linux/sysctl.h>
52028 +#include <linux/grsecurity.h>
52029 +#include <linux/grinternal.h>
52030 +
52031 +int
52032 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52033 +{
52034 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52035 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52036 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52037 + return -EACCES;
52038 + }
52039 +#endif
52040 + return 0;
52041 +}
52042 +
52043 +#ifdef CONFIG_GRKERNSEC_ROFS
52044 +static int __maybe_unused one = 1;
52045 +#endif
52046 +
52047 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52048 +struct ctl_table grsecurity_table[] = {
52049 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52050 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52051 +#ifdef CONFIG_GRKERNSEC_IO
52052 + {
52053 + .procname = "disable_priv_io",
52054 + .data = &grsec_disable_privio,
52055 + .maxlen = sizeof(int),
52056 + .mode = 0600,
52057 + .proc_handler = &proc_dointvec,
52058 + },
52059 +#endif
52060 +#endif
52061 +#ifdef CONFIG_GRKERNSEC_LINK
52062 + {
52063 + .procname = "linking_restrictions",
52064 + .data = &grsec_enable_link,
52065 + .maxlen = sizeof(int),
52066 + .mode = 0600,
52067 + .proc_handler = &proc_dointvec,
52068 + },
52069 +#endif
52070 +#ifdef CONFIG_GRKERNSEC_BRUTE
52071 + {
52072 + .procname = "deter_bruteforce",
52073 + .data = &grsec_enable_brute,
52074 + .maxlen = sizeof(int),
52075 + .mode = 0600,
52076 + .proc_handler = &proc_dointvec,
52077 + },
52078 +#endif
52079 +#ifdef CONFIG_GRKERNSEC_FIFO
52080 + {
52081 + .procname = "fifo_restrictions",
52082 + .data = &grsec_enable_fifo,
52083 + .maxlen = sizeof(int),
52084 + .mode = 0600,
52085 + .proc_handler = &proc_dointvec,
52086 + },
52087 +#endif
52088 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52089 + {
52090 + .procname = "ip_blackhole",
52091 + .data = &grsec_enable_blackhole,
52092 + .maxlen = sizeof(int),
52093 + .mode = 0600,
52094 + .proc_handler = &proc_dointvec,
52095 + },
52096 + {
52097 + .procname = "lastack_retries",
52098 + .data = &grsec_lastack_retries,
52099 + .maxlen = sizeof(int),
52100 + .mode = 0600,
52101 + .proc_handler = &proc_dointvec,
52102 + },
52103 +#endif
52104 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52105 + {
52106 + .procname = "exec_logging",
52107 + .data = &grsec_enable_execlog,
52108 + .maxlen = sizeof(int),
52109 + .mode = 0600,
52110 + .proc_handler = &proc_dointvec,
52111 + },
52112 +#endif
52113 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52114 + {
52115 + .procname = "rwxmap_logging",
52116 + .data = &grsec_enable_log_rwxmaps,
52117 + .maxlen = sizeof(int),
52118 + .mode = 0600,
52119 + .proc_handler = &proc_dointvec,
52120 + },
52121 +#endif
52122 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52123 + {
52124 + .procname = "signal_logging",
52125 + .data = &grsec_enable_signal,
52126 + .maxlen = sizeof(int),
52127 + .mode = 0600,
52128 + .proc_handler = &proc_dointvec,
52129 + },
52130 +#endif
52131 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52132 + {
52133 + .procname = "forkfail_logging",
52134 + .data = &grsec_enable_forkfail,
52135 + .maxlen = sizeof(int),
52136 + .mode = 0600,
52137 + .proc_handler = &proc_dointvec,
52138 + },
52139 +#endif
52140 +#ifdef CONFIG_GRKERNSEC_TIME
52141 + {
52142 + .procname = "timechange_logging",
52143 + .data = &grsec_enable_time,
52144 + .maxlen = sizeof(int),
52145 + .mode = 0600,
52146 + .proc_handler = &proc_dointvec,
52147 + },
52148 +#endif
52149 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52150 + {
52151 + .procname = "chroot_deny_shmat",
52152 + .data = &grsec_enable_chroot_shmat,
52153 + .maxlen = sizeof(int),
52154 + .mode = 0600,
52155 + .proc_handler = &proc_dointvec,
52156 + },
52157 +#endif
52158 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52159 + {
52160 + .procname = "chroot_deny_unix",
52161 + .data = &grsec_enable_chroot_unix,
52162 + .maxlen = sizeof(int),
52163 + .mode = 0600,
52164 + .proc_handler = &proc_dointvec,
52165 + },
52166 +#endif
52167 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52168 + {
52169 + .procname = "chroot_deny_mount",
52170 + .data = &grsec_enable_chroot_mount,
52171 + .maxlen = sizeof(int),
52172 + .mode = 0600,
52173 + .proc_handler = &proc_dointvec,
52174 + },
52175 +#endif
52176 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52177 + {
52178 + .procname = "chroot_deny_fchdir",
52179 + .data = &grsec_enable_chroot_fchdir,
52180 + .maxlen = sizeof(int),
52181 + .mode = 0600,
52182 + .proc_handler = &proc_dointvec,
52183 + },
52184 +#endif
52185 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52186 + {
52187 + .procname = "chroot_deny_chroot",
52188 + .data = &grsec_enable_chroot_double,
52189 + .maxlen = sizeof(int),
52190 + .mode = 0600,
52191 + .proc_handler = &proc_dointvec,
52192 + },
52193 +#endif
52194 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52195 + {
52196 + .procname = "chroot_deny_pivot",
52197 + .data = &grsec_enable_chroot_pivot,
52198 + .maxlen = sizeof(int),
52199 + .mode = 0600,
52200 + .proc_handler = &proc_dointvec,
52201 + },
52202 +#endif
52203 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52204 + {
52205 + .procname = "chroot_enforce_chdir",
52206 + .data = &grsec_enable_chroot_chdir,
52207 + .maxlen = sizeof(int),
52208 + .mode = 0600,
52209 + .proc_handler = &proc_dointvec,
52210 + },
52211 +#endif
52212 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52213 + {
52214 + .procname = "chroot_deny_chmod",
52215 + .data = &grsec_enable_chroot_chmod,
52216 + .maxlen = sizeof(int),
52217 + .mode = 0600,
52218 + .proc_handler = &proc_dointvec,
52219 + },
52220 +#endif
52221 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52222 + {
52223 + .procname = "chroot_deny_mknod",
52224 + .data = &grsec_enable_chroot_mknod,
52225 + .maxlen = sizeof(int),
52226 + .mode = 0600,
52227 + .proc_handler = &proc_dointvec,
52228 + },
52229 +#endif
52230 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52231 + {
52232 + .procname = "chroot_restrict_nice",
52233 + .data = &grsec_enable_chroot_nice,
52234 + .maxlen = sizeof(int),
52235 + .mode = 0600,
52236 + .proc_handler = &proc_dointvec,
52237 + },
52238 +#endif
52239 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52240 + {
52241 + .procname = "chroot_execlog",
52242 + .data = &grsec_enable_chroot_execlog,
52243 + .maxlen = sizeof(int),
52244 + .mode = 0600,
52245 + .proc_handler = &proc_dointvec,
52246 + },
52247 +#endif
52248 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52249 + {
52250 + .procname = "chroot_caps",
52251 + .data = &grsec_enable_chroot_caps,
52252 + .maxlen = sizeof(int),
52253 + .mode = 0600,
52254 + .proc_handler = &proc_dointvec,
52255 + },
52256 +#endif
52257 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52258 + {
52259 + .procname = "chroot_deny_sysctl",
52260 + .data = &grsec_enable_chroot_sysctl,
52261 + .maxlen = sizeof(int),
52262 + .mode = 0600,
52263 + .proc_handler = &proc_dointvec,
52264 + },
52265 +#endif
52266 +#ifdef CONFIG_GRKERNSEC_TPE
52267 + {
52268 + .procname = "tpe",
52269 + .data = &grsec_enable_tpe,
52270 + .maxlen = sizeof(int),
52271 + .mode = 0600,
52272 + .proc_handler = &proc_dointvec,
52273 + },
52274 + {
52275 + .procname = "tpe_gid",
52276 + .data = &grsec_tpe_gid,
52277 + .maxlen = sizeof(int),
52278 + .mode = 0600,
52279 + .proc_handler = &proc_dointvec,
52280 + },
52281 +#endif
52282 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52283 + {
52284 + .procname = "tpe_invert",
52285 + .data = &grsec_enable_tpe_invert,
52286 + .maxlen = sizeof(int),
52287 + .mode = 0600,
52288 + .proc_handler = &proc_dointvec,
52289 + },
52290 +#endif
52291 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52292 + {
52293 + .procname = "tpe_restrict_all",
52294 + .data = &grsec_enable_tpe_all,
52295 + .maxlen = sizeof(int),
52296 + .mode = 0600,
52297 + .proc_handler = &proc_dointvec,
52298 + },
52299 +#endif
52300 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52301 + {
52302 + .procname = "socket_all",
52303 + .data = &grsec_enable_socket_all,
52304 + .maxlen = sizeof(int),
52305 + .mode = 0600,
52306 + .proc_handler = &proc_dointvec,
52307 + },
52308 + {
52309 + .procname = "socket_all_gid",
52310 + .data = &grsec_socket_all_gid,
52311 + .maxlen = sizeof(int),
52312 + .mode = 0600,
52313 + .proc_handler = &proc_dointvec,
52314 + },
52315 +#endif
52316 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52317 + {
52318 + .procname = "socket_client",
52319 + .data = &grsec_enable_socket_client,
52320 + .maxlen = sizeof(int),
52321 + .mode = 0600,
52322 + .proc_handler = &proc_dointvec,
52323 + },
52324 + {
52325 + .procname = "socket_client_gid",
52326 + .data = &grsec_socket_client_gid,
52327 + .maxlen = sizeof(int),
52328 + .mode = 0600,
52329 + .proc_handler = &proc_dointvec,
52330 + },
52331 +#endif
52332 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52333 + {
52334 + .procname = "socket_server",
52335 + .data = &grsec_enable_socket_server,
52336 + .maxlen = sizeof(int),
52337 + .mode = 0600,
52338 + .proc_handler = &proc_dointvec,
52339 + },
52340 + {
52341 + .procname = "socket_server_gid",
52342 + .data = &grsec_socket_server_gid,
52343 + .maxlen = sizeof(int),
52344 + .mode = 0600,
52345 + .proc_handler = &proc_dointvec,
52346 + },
52347 +#endif
52348 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52349 + {
52350 + .procname = "audit_group",
52351 + .data = &grsec_enable_group,
52352 + .maxlen = sizeof(int),
52353 + .mode = 0600,
52354 + .proc_handler = &proc_dointvec,
52355 + },
52356 + {
52357 + .procname = "audit_gid",
52358 + .data = &grsec_audit_gid,
52359 + .maxlen = sizeof(int),
52360 + .mode = 0600,
52361 + .proc_handler = &proc_dointvec,
52362 + },
52363 +#endif
52364 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52365 + {
52366 + .procname = "audit_chdir",
52367 + .data = &grsec_enable_chdir,
52368 + .maxlen = sizeof(int),
52369 + .mode = 0600,
52370 + .proc_handler = &proc_dointvec,
52371 + },
52372 +#endif
52373 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52374 + {
52375 + .procname = "audit_mount",
52376 + .data = &grsec_enable_mount,
52377 + .maxlen = sizeof(int),
52378 + .mode = 0600,
52379 + .proc_handler = &proc_dointvec,
52380 + },
52381 +#endif
52382 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52383 + {
52384 + .procname = "audit_textrel",
52385 + .data = &grsec_enable_audit_textrel,
52386 + .maxlen = sizeof(int),
52387 + .mode = 0600,
52388 + .proc_handler = &proc_dointvec,
52389 + },
52390 +#endif
52391 +#ifdef CONFIG_GRKERNSEC_DMESG
52392 + {
52393 + .procname = "dmesg",
52394 + .data = &grsec_enable_dmesg,
52395 + .maxlen = sizeof(int),
52396 + .mode = 0600,
52397 + .proc_handler = &proc_dointvec,
52398 + },
52399 +#endif
52400 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52401 + {
52402 + .procname = "chroot_findtask",
52403 + .data = &grsec_enable_chroot_findtask,
52404 + .maxlen = sizeof(int),
52405 + .mode = 0600,
52406 + .proc_handler = &proc_dointvec,
52407 + },
52408 +#endif
52409 +#ifdef CONFIG_GRKERNSEC_RESLOG
52410 + {
52411 + .procname = "resource_logging",
52412 + .data = &grsec_resource_logging,
52413 + .maxlen = sizeof(int),
52414 + .mode = 0600,
52415 + .proc_handler = &proc_dointvec,
52416 + },
52417 +#endif
52418 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52419 + {
52420 + .procname = "audit_ptrace",
52421 + .data = &grsec_enable_audit_ptrace,
52422 + .maxlen = sizeof(int),
52423 + .mode = 0600,
52424 + .proc_handler = &proc_dointvec,
52425 + },
52426 +#endif
52427 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52428 + {
52429 + .procname = "harden_ptrace",
52430 + .data = &grsec_enable_harden_ptrace,
52431 + .maxlen = sizeof(int),
52432 + .mode = 0600,
52433 + .proc_handler = &proc_dointvec,
52434 + },
52435 +#endif
52436 + {
52437 + .procname = "grsec_lock",
52438 + .data = &grsec_lock,
52439 + .maxlen = sizeof(int),
52440 + .mode = 0600,
52441 + .proc_handler = &proc_dointvec,
52442 + },
52443 +#endif
52444 +#ifdef CONFIG_GRKERNSEC_ROFS
52445 + {
52446 + .procname = "romount_protect",
52447 + .data = &grsec_enable_rofs,
52448 + .maxlen = sizeof(int),
52449 + .mode = 0600,
52450 + .proc_handler = &proc_dointvec_minmax,
52451 + .extra1 = &one,
52452 + .extra2 = &one,
52453 + },
52454 +#endif
52455 + { }
52456 +};
52457 +#endif
52458 diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
52459 --- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52460 +++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
52461 @@ -0,0 +1,16 @@
52462 +#include <linux/kernel.h>
52463 +#include <linux/sched.h>
52464 +#include <linux/grinternal.h>
52465 +#include <linux/module.h>
52466 +
52467 +void
52468 +gr_log_timechange(void)
52469 +{
52470 +#ifdef CONFIG_GRKERNSEC_TIME
52471 + if (grsec_enable_time)
52472 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52473 +#endif
52474 + return;
52475 +}
52476 +
52477 +EXPORT_SYMBOL(gr_log_timechange);
52478 diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
52479 --- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52480 +++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
52481 @@ -0,0 +1,39 @@
52482 +#include <linux/kernel.h>
52483 +#include <linux/sched.h>
52484 +#include <linux/file.h>
52485 +#include <linux/fs.h>
52486 +#include <linux/grinternal.h>
52487 +
52488 +extern int gr_acl_tpe_check(void);
52489 +
52490 +int
52491 +gr_tpe_allow(const struct file *file)
52492 +{
52493 +#ifdef CONFIG_GRKERNSEC
52494 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52495 + const struct cred *cred = current_cred();
52496 +
52497 + if (cred->uid && ((grsec_enable_tpe &&
52498 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52499 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52500 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52501 +#else
52502 + in_group_p(grsec_tpe_gid)
52503 +#endif
52504 + ) || gr_acl_tpe_check()) &&
52505 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52506 + (inode->i_mode & S_IWOTH))))) {
52507 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52508 + return 0;
52509 + }
52510 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52511 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52512 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52513 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52514 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52515 + return 0;
52516 + }
52517 +#endif
52518 +#endif
52519 + return 1;
52520 +}
52521 diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
52522 --- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52523 +++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
52524 @@ -0,0 +1,61 @@
52525 +#include <linux/err.h>
52526 +#include <linux/kernel.h>
52527 +#include <linux/sched.h>
52528 +#include <linux/mm.h>
52529 +#include <linux/scatterlist.h>
52530 +#include <linux/crypto.h>
52531 +#include <linux/gracl.h>
52532 +
52533 +
52534 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52535 +#error "crypto and sha256 must be built into the kernel"
52536 +#endif
52537 +
52538 +int
52539 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52540 +{
52541 + char *p;
52542 + struct crypto_hash *tfm;
52543 + struct hash_desc desc;
52544 + struct scatterlist sg;
52545 + unsigned char temp_sum[GR_SHA_LEN];
52546 + volatile int retval = 0;
52547 + volatile int dummy = 0;
52548 + unsigned int i;
52549 +
52550 + sg_init_table(&sg, 1);
52551 +
52552 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52553 + if (IS_ERR(tfm)) {
52554 + /* should never happen, since sha256 should be built in */
52555 + return 1;
52556 + }
52557 +
52558 + desc.tfm = tfm;
52559 + desc.flags = 0;
52560 +
52561 + crypto_hash_init(&desc);
52562 +
52563 + p = salt;
52564 + sg_set_buf(&sg, p, GR_SALT_LEN);
52565 + crypto_hash_update(&desc, &sg, sg.length);
52566 +
52567 + p = entry->pw;
52568 + sg_set_buf(&sg, p, strlen(p));
52569 +
52570 + crypto_hash_update(&desc, &sg, sg.length);
52571 +
52572 + crypto_hash_final(&desc, temp_sum);
52573 +
52574 + memset(entry->pw, 0, GR_PW_LEN);
52575 +
52576 + for (i = 0; i < GR_SHA_LEN; i++)
52577 + if (sum[i] != temp_sum[i])
52578 + retval = 1;
52579 + else
52580 + dummy = 1; // waste a cycle
52581 +
52582 + crypto_free_hash(tfm);
52583 +
52584 + return retval;
52585 +}
52586 diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
52587 --- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52588 +++ linux-3.0.4/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
52589 @@ -0,0 +1,1038 @@
52590 +#
52591 +# grecurity configuration
52592 +#
52593 +
52594 +menu "Grsecurity"
52595 +
52596 +config GRKERNSEC
52597 + bool "Grsecurity"
52598 + select CRYPTO
52599 + select CRYPTO_SHA256
52600 + help
52601 + If you say Y here, you will be able to configure many features
52602 + that will enhance the security of your system. It is highly
52603 + recommended that you say Y here and read through the help
52604 + for each option so that you fully understand the features and
52605 + can evaluate their usefulness for your machine.
52606 +
52607 +choice
52608 + prompt "Security Level"
52609 + depends on GRKERNSEC
52610 + default GRKERNSEC_CUSTOM
52611 +
52612 +config GRKERNSEC_LOW
52613 + bool "Low"
52614 + select GRKERNSEC_LINK
52615 + select GRKERNSEC_FIFO
52616 + select GRKERNSEC_RANDNET
52617 + select GRKERNSEC_DMESG
52618 + select GRKERNSEC_CHROOT
52619 + select GRKERNSEC_CHROOT_CHDIR
52620 +
52621 + help
52622 + If you choose this option, several of the grsecurity options will
52623 + be enabled that will give you greater protection against a number
52624 + of attacks, while assuring that none of your software will have any
52625 + conflicts with the additional security measures. If you run a lot
52626 + of unusual software, or you are having problems with the higher
52627 + security levels, you should say Y here. With this option, the
52628 + following features are enabled:
52629 +
52630 + - Linking restrictions
52631 + - FIFO restrictions
52632 + - Restricted dmesg
52633 + - Enforced chdir("/") on chroot
52634 + - Runtime module disabling
52635 +
52636 +config GRKERNSEC_MEDIUM
52637 + bool "Medium"
52638 + select PAX
52639 + select PAX_EI_PAX
52640 + select PAX_PT_PAX_FLAGS
52641 + select PAX_HAVE_ACL_FLAGS
52642 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52643 + select GRKERNSEC_CHROOT
52644 + select GRKERNSEC_CHROOT_SYSCTL
52645 + select GRKERNSEC_LINK
52646 + select GRKERNSEC_FIFO
52647 + select GRKERNSEC_DMESG
52648 + select GRKERNSEC_RANDNET
52649 + select GRKERNSEC_FORKFAIL
52650 + select GRKERNSEC_TIME
52651 + select GRKERNSEC_SIGNAL
52652 + select GRKERNSEC_CHROOT
52653 + select GRKERNSEC_CHROOT_UNIX
52654 + select GRKERNSEC_CHROOT_MOUNT
52655 + select GRKERNSEC_CHROOT_PIVOT
52656 + select GRKERNSEC_CHROOT_DOUBLE
52657 + select GRKERNSEC_CHROOT_CHDIR
52658 + select GRKERNSEC_CHROOT_MKNOD
52659 + select GRKERNSEC_PROC
52660 + select GRKERNSEC_PROC_USERGROUP
52661 + select PAX_RANDUSTACK
52662 + select PAX_ASLR
52663 + select PAX_RANDMMAP
52664 + select PAX_REFCOUNT if (X86 || SPARC64)
52665 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
52666 +
52667 + help
52668 + If you say Y here, several features in addition to those included
52669 + in the low additional security level will be enabled. These
52670 + features provide even more security to your system, though in rare
52671 + cases they may be incompatible with very old or poorly written
52672 + software. If you enable this option, make sure that your auth
52673 + service (identd) is running as gid 1001. With this option,
52674 + the following features (in addition to those provided in the
52675 + low additional security level) will be enabled:
52676 +
52677 + - Failed fork logging
52678 + - Time change logging
52679 + - Signal logging
52680 + - Deny mounts in chroot
52681 + - Deny double chrooting
52682 + - Deny sysctl writes in chroot
52683 + - Deny mknod in chroot
52684 + - Deny access to abstract AF_UNIX sockets out of chroot
52685 + - Deny pivot_root in chroot
52686 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52687 + - /proc restrictions with special GID set to 10 (usually wheel)
52688 + - Address Space Layout Randomization (ASLR)
52689 + - Prevent exploitation of most refcount overflows
52690 + - Bounds checking of copying between the kernel and userland
52691 +
52692 +config GRKERNSEC_HIGH
52693 + bool "High"
52694 + select GRKERNSEC_LINK
52695 + select GRKERNSEC_FIFO
52696 + select GRKERNSEC_DMESG
52697 + select GRKERNSEC_FORKFAIL
52698 + select GRKERNSEC_TIME
52699 + select GRKERNSEC_SIGNAL
52700 + select GRKERNSEC_CHROOT
52701 + select GRKERNSEC_CHROOT_SHMAT
52702 + select GRKERNSEC_CHROOT_UNIX
52703 + select GRKERNSEC_CHROOT_MOUNT
52704 + select GRKERNSEC_CHROOT_FCHDIR
52705 + select GRKERNSEC_CHROOT_PIVOT
52706 + select GRKERNSEC_CHROOT_DOUBLE
52707 + select GRKERNSEC_CHROOT_CHDIR
52708 + select GRKERNSEC_CHROOT_MKNOD
52709 + select GRKERNSEC_CHROOT_CAPS
52710 + select GRKERNSEC_CHROOT_SYSCTL
52711 + select GRKERNSEC_CHROOT_FINDTASK
52712 + select GRKERNSEC_SYSFS_RESTRICT
52713 + select GRKERNSEC_PROC
52714 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52715 + select GRKERNSEC_HIDESYM
52716 + select GRKERNSEC_BRUTE
52717 + select GRKERNSEC_PROC_USERGROUP
52718 + select GRKERNSEC_KMEM
52719 + select GRKERNSEC_RESLOG
52720 + select GRKERNSEC_RANDNET
52721 + select GRKERNSEC_PROC_ADD
52722 + select GRKERNSEC_CHROOT_CHMOD
52723 + select GRKERNSEC_CHROOT_NICE
52724 + select GRKERNSEC_AUDIT_MOUNT
52725 + select GRKERNSEC_MODHARDEN if (MODULES)
52726 + select GRKERNSEC_HARDEN_PTRACE
52727 + select GRKERNSEC_VM86 if (X86_32)
52728 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
52729 + select PAX
52730 + select PAX_RANDUSTACK
52731 + select PAX_ASLR
52732 + select PAX_RANDMMAP
52733 + select PAX_NOEXEC
52734 + select PAX_MPROTECT
52735 + select PAX_EI_PAX
52736 + select PAX_PT_PAX_FLAGS
52737 + select PAX_HAVE_ACL_FLAGS
52738 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52739 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52740 + select PAX_RANDKSTACK if (X86_TSC && X86)
52741 + select PAX_SEGMEXEC if (X86_32)
52742 + select PAX_PAGEEXEC
52743 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
52744 + select PAX_EMUTRAMP if (PARISC)
52745 + select PAX_EMUSIGRT if (PARISC)
52746 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52747 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52748 + select PAX_REFCOUNT if (X86 || SPARC64)
52749 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
52750 + help
52751 + If you say Y here, many of the features of grsecurity will be
52752 + enabled, which will protect you against many kinds of attacks
52753 + against your system. The heightened security comes at a cost
52754 + of an increased chance of incompatibilities with rare software
52755 + on your machine. Since this security level enables PaX, you should
52756 + view <http://pax.grsecurity.net> and read about the PaX
52757 + project. While you are there, download chpax and run it on
52758 + binaries that cause problems with PaX. Also remember that
52759 + since the /proc restrictions are enabled, you must run your
52760 + identd as gid 1001. This security level enables the following
52761 + features in addition to those listed in the low and medium
52762 + security levels:
52763 +
52764 + - Additional /proc restrictions
52765 + - Chmod restrictions in chroot
52766 + - No signals, ptrace, or viewing of processes outside of chroot
52767 + - Capability restrictions in chroot
52768 + - Deny fchdir out of chroot
52769 + - Priority restrictions in chroot
52770 + - Segmentation-based implementation of PaX
52771 + - Mprotect restrictions
52772 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52773 + - Kernel stack randomization
52774 + - Mount/unmount/remount logging
52775 + - Kernel symbol hiding
52776 + - Prevention of memory exhaustion-based exploits
52777 + - Hardening of module auto-loading
52778 + - Ptrace restrictions
52779 + - Restricted vm86 mode
52780 + - Restricted sysfs/debugfs
52781 + - Active kernel exploit response
52782 +
52783 +config GRKERNSEC_CUSTOM
52784 + bool "Custom"
52785 + help
52786 + If you say Y here, you will be able to configure every grsecurity
52787 + option, which allows you to enable many more features that aren't
52788 + covered in the basic security levels. These additional features
52789 + include TPE, socket restrictions, and the sysctl system for
52790 + grsecurity. It is advised that you read through the help for
52791 + each option to determine its usefulness in your situation.
52792 +
52793 +endchoice
52794 +
52795 +menu "Address Space Protection"
52796 +depends on GRKERNSEC
52797 +
52798 +config GRKERNSEC_KMEM
52799 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52800 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52801 + help
52802 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52803 + be written to via mmap or otherwise to modify the running kernel.
52804 + /dev/port will also not be allowed to be opened. If you have module
52805 + support disabled, enabling this will close up four ways that are
52806 + currently used to insert malicious code into the running kernel.
52807 + Even with all these features enabled, we still highly recommend that
52808 + you use the RBAC system, as it is still possible for an attacker to
52809 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52810 + If you are not using XFree86, you may be able to stop this additional
52811 + case by enabling the 'Disable privileged I/O' option. Though nothing
52812 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52813 + but only to video memory, which is the only writing we allow in this
52814 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52815 + not be allowed to mprotect it with PROT_WRITE later.
52816 + It is highly recommended that you say Y here if you meet all the
52817 + conditions above.
52818 +
52819 +config GRKERNSEC_VM86
52820 + bool "Restrict VM86 mode"
52821 + depends on X86_32
52822 +
52823 + help
52824 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52825 + make use of a special execution mode on 32bit x86 processors called
52826 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52827 + video cards and will still work with this option enabled. The purpose
52828 + of the option is to prevent exploitation of emulation errors in
52829 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52830 + Nearly all users should be able to enable this option.
52831 +
52832 +config GRKERNSEC_IO
52833 + bool "Disable privileged I/O"
52834 + depends on X86
52835 + select RTC_CLASS
52836 + select RTC_INTF_DEV
52837 + select RTC_DRV_CMOS
52838 +
52839 + help
52840 + If you say Y here, all ioperm and iopl calls will return an error.
52841 + Ioperm and iopl can be used to modify the running kernel.
52842 + Unfortunately, some programs need this access to operate properly,
52843 + the most notable of which are XFree86 and hwclock. hwclock can be
52844 + remedied by having RTC support in the kernel, so real-time
52845 + clock support is enabled if this option is enabled, to ensure
52846 + that hwclock operates correctly. XFree86 still will not
52847 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52848 + IF YOU USE XFree86. If you use XFree86 and you still want to
52849 + protect your kernel against modification, use the RBAC system.
52850 +
52851 +config GRKERNSEC_PROC_MEMMAP
52852 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52853 + default y if (PAX_NOEXEC || PAX_ASLR)
52854 + depends on PAX_NOEXEC || PAX_ASLR
52855 + help
52856 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52857 + give no information about the addresses of its mappings if
52858 + PaX features that rely on random addresses are enabled on the task.
52859 + If you use PaX it is greatly recommended that you say Y here as it
52860 + closes up a hole that makes the full ASLR useless for suid
52861 + binaries.
52862 +
52863 +config GRKERNSEC_BRUTE
52864 + bool "Deter exploit bruteforcing"
52865 + help
52866 + If you say Y here, attempts to bruteforce exploits against forking
52867 + daemons such as apache or sshd, as well as against suid/sgid binaries
52868 + will be deterred. When a child of a forking daemon is killed by PaX
52869 + or crashes due to an illegal instruction or other suspicious signal,
52870 + the parent process will be delayed 30 seconds upon every subsequent
52871 + fork until the administrator is able to assess the situation and
52872 + restart the daemon.
52873 + In the suid/sgid case, the attempt is logged, the user has all their
52874 + processes terminated, and they are prevented from executing any further
52875 + processes for 15 minutes.
52876 + It is recommended that you also enable signal logging in the auditing
52877 + section so that logs are generated when a process triggers a suspicious
52878 + signal.
52879 + If the sysctl option is enabled, a sysctl option with name
52880 + "deter_bruteforce" is created.
52881 +
52882 +
52883 +config GRKERNSEC_MODHARDEN
52884 + bool "Harden module auto-loading"
52885 + depends on MODULES
52886 + help
52887 + If you say Y here, module auto-loading in response to use of some
52888 + feature implemented by an unloaded module will be restricted to
52889 + root users. Enabling this option helps defend against attacks
52890 + by unprivileged users who abuse the auto-loading behavior to
52891 + cause a vulnerable module to load that is then exploited.
52892 +
52893 + If this option prevents a legitimate use of auto-loading for a
52894 + non-root user, the administrator can execute modprobe manually
52895 + with the exact name of the module mentioned in the alert log.
52896 + Alternatively, the administrator can add the module to the list
52897 + of modules loaded at boot by modifying init scripts.
52898 +
52899 + Modification of init scripts will most likely be needed on
52900 + Ubuntu servers with encrypted home directory support enabled,
52901 + as the first non-root user logging in will cause the ecb(aes),
52902 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52903 +
52904 +config GRKERNSEC_HIDESYM
52905 + bool "Hide kernel symbols"
52906 + help
52907 + If you say Y here, getting information on loaded modules, and
52908 + displaying all kernel symbols through a syscall will be restricted
52909 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52910 + /proc/kallsyms will be restricted to the root user. The RBAC
52911 + system can hide that entry even from root.
52912 +
52913 + This option also prevents leaking of kernel addresses through
52914 + several /proc entries.
52915 +
52916 + Note that this option is only effective provided the following
52917 + conditions are met:
52918 + 1) The kernel using grsecurity is not precompiled by some distribution
52919 + 2) You have also enabled GRKERNSEC_DMESG
52920 + 3) You are using the RBAC system and hiding other files such as your
52921 + kernel image and System.map. Alternatively, enabling this option
52922 + causes the permissions on /boot, /lib/modules, and the kernel
52923 + source directory to change at compile time to prevent
52924 + reading by non-root users.
52925 + If the above conditions are met, this option will aid in providing a
52926 + useful protection against local kernel exploitation of overflows
52927 + and arbitrary read/write vulnerabilities.
52928 +
52929 +config GRKERNSEC_KERN_LOCKOUT
52930 + bool "Active kernel exploit response"
52931 + depends on X86 || ARM || PPC || SPARC
52932 + help
52933 + If you say Y here, when a PaX alert is triggered due to suspicious
52934 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52935 + or an OOPs occurs due to bad memory accesses, instead of just
52936 + terminating the offending process (and potentially allowing
52937 + a subsequent exploit from the same user), we will take one of two
52938 + actions:
52939 + If the user was root, we will panic the system
52940 + If the user was non-root, we will log the attempt, terminate
52941 + all processes owned by the user, then prevent them from creating
52942 + any new processes until the system is restarted
52943 + This deters repeated kernel exploitation/bruteforcing attempts
52944 + and is useful for later forensics.
52945 +
52946 +endmenu
52947 +menu "Role Based Access Control Options"
52948 +depends on GRKERNSEC
52949 +
52950 +config GRKERNSEC_RBAC_DEBUG
52951 + bool
52952 +
52953 +config GRKERNSEC_NO_RBAC
52954 + bool "Disable RBAC system"
52955 + help
52956 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52957 + preventing the RBAC system from being enabled. You should only say Y
52958 + here if you have no intention of using the RBAC system, so as to prevent
52959 + an attacker with root access from misusing the RBAC system to hide files
52960 + and processes when loadable module support and /dev/[k]mem have been
52961 + locked down.
52962 +
52963 +config GRKERNSEC_ACL_HIDEKERN
52964 + bool "Hide kernel processes"
52965 + help
52966 + If you say Y here, all kernel threads will be hidden to all
52967 + processes but those whose subject has the "view hidden processes"
52968 + flag.
52969 +
52970 +config GRKERNSEC_ACL_MAXTRIES
52971 + int "Maximum tries before password lockout"
52972 + default 3
52973 + help
52974 + This option enforces the maximum number of times a user can attempt
52975 + to authorize themselves with the grsecurity RBAC system before being
52976 + denied the ability to attempt authorization again for a specified time.
52977 + The lower the number, the harder it will be to brute-force a password.
52978 +
52979 +config GRKERNSEC_ACL_TIMEOUT
52980 + int "Time to wait after max password tries, in seconds"
52981 + default 30
52982 + help
52983 + This option specifies the time the user must wait after attempting to
52984 + authorize to the RBAC system with the maximum number of invalid
52985 + passwords. The higher the number, the harder it will be to brute-force
52986 + a password.
52987 +
52988 +endmenu
52989 +menu "Filesystem Protections"
52990 +depends on GRKERNSEC
52991 +
52992 +config GRKERNSEC_PROC
52993 + bool "Proc restrictions"
52994 + help
52995 + If you say Y here, the permissions of the /proc filesystem
52996 + will be altered to enhance system security and privacy. You MUST
52997 + choose either a user only restriction or a user and group restriction.
52998 + Depending upon the option you choose, you can either restrict users to
52999 + see only the processes they themselves run, or choose a group that can
53000 + view all processes and files normally restricted to root if you choose
53001 + the "restrict to user only" option. NOTE: If you're running identd as
53002 + a non-root user, you will have to run it as the group you specify here.
53003 +
53004 +config GRKERNSEC_PROC_USER
53005 + bool "Restrict /proc to user only"
53006 + depends on GRKERNSEC_PROC
53007 + help
53008 + If you say Y here, non-root users will only be able to view their own
53009 + processes, and restricts them from viewing network-related information,
53010 + and viewing kernel symbol and module information.
53011 +
53012 +config GRKERNSEC_PROC_USERGROUP
53013 + bool "Allow special group"
53014 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53015 + help
53016 + If you say Y here, you will be able to select a group that will be
53017 + able to view all processes and network-related information. If you've
53018 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53019 + remain hidden. This option is useful if you want to run identd as
53020 + a non-root user.
53021 +
53022 +config GRKERNSEC_PROC_GID
53023 + int "GID for special group"
53024 + depends on GRKERNSEC_PROC_USERGROUP
53025 + default 1001
53026 +
53027 +config GRKERNSEC_PROC_ADD
53028 + bool "Additional restrictions"
53029 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53030 + help
53031 + If you say Y here, additional restrictions will be placed on
53032 + /proc that keep normal users from viewing device information and
53033 + slabinfo information that could be useful for exploits.
53034 +
53035 +config GRKERNSEC_LINK
53036 + bool "Linking restrictions"
53037 + help
53038 + If you say Y here, /tmp race exploits will be prevented, since users
53039 + will no longer be able to follow symlinks owned by other users in
53040 + world-writable +t directories (e.g. /tmp), unless the owner of the
53041 + symlink is the owner of the directory. users will also not be
53042 + able to hardlink to files they do not own. If the sysctl option is
53043 + enabled, a sysctl option with name "linking_restrictions" is created.
53044 +
53045 +config GRKERNSEC_FIFO
53046 + bool "FIFO restrictions"
53047 + help
53048 + If you say Y here, users will not be able to write to FIFOs they don't
53049 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53050 + the FIFO is the same owner of the directory it's held in. If the sysctl
53051 + option is enabled, a sysctl option with name "fifo_restrictions" is
53052 + created.
53053 +
53054 +config GRKERNSEC_SYSFS_RESTRICT
53055 + bool "Sysfs/debugfs restriction"
53056 + depends on SYSFS
53057 + help
53058 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53059 + any filesystem normally mounted under it (e.g. debugfs) will only
53060 + be accessible by root. These filesystems generally provide access
53061 + to hardware and debug information that isn't appropriate for unprivileged
53062 + users of the system. Sysfs and debugfs have also become a large source
53063 + of new vulnerabilities, ranging from infoleaks to local compromise.
53064 + There has been very little oversight with an eye toward security involved
53065 + in adding new exporters of information to these filesystems, so their
53066 + use is discouraged.
53067 + This option is equivalent to a chmod 0700 of the mount paths.
53068 +
53069 +config GRKERNSEC_ROFS
53070 + bool "Runtime read-only mount protection"
53071 + help
53072 + If you say Y here, a sysctl option with name "romount_protect" will
53073 + be created. By setting this option to 1 at runtime, filesystems
53074 + will be protected in the following ways:
53075 + * No new writable mounts will be allowed
53076 + * Existing read-only mounts won't be able to be remounted read/write
53077 + * Write operations will be denied on all block devices
53078 + This option acts independently of grsec_lock: once it is set to 1,
53079 + it cannot be turned off. Therefore, please be mindful of the resulting
53080 + behavior if this option is enabled in an init script on a read-only
53081 + filesystem. This feature is mainly intended for secure embedded systems.
53082 +
53083 +config GRKERNSEC_CHROOT
53084 + bool "Chroot jail restrictions"
53085 + help
53086 + If you say Y here, you will be able to choose several options that will
53087 + make breaking out of a chrooted jail much more difficult. If you
53088 + encounter no software incompatibilities with the following options, it
53089 + is recommended that you enable each one.
53090 +
53091 +config GRKERNSEC_CHROOT_MOUNT
53092 + bool "Deny mounts"
53093 + depends on GRKERNSEC_CHROOT
53094 + help
53095 + If you say Y here, processes inside a chroot will not be able to
53096 + mount or remount filesystems. If the sysctl option is enabled, a
53097 + sysctl option with name "chroot_deny_mount" is created.
53098 +
53099 +config GRKERNSEC_CHROOT_DOUBLE
53100 + bool "Deny double-chroots"
53101 + depends on GRKERNSEC_CHROOT
53102 + help
53103 + If you say Y here, processes inside a chroot will not be able to chroot
53104 + again outside the chroot. This is a widely used method of breaking
53105 + out of a chroot jail and should not be allowed. If the sysctl
53106 + option is enabled, a sysctl option with name
53107 + "chroot_deny_chroot" is created.
53108 +
53109 +config GRKERNSEC_CHROOT_PIVOT
53110 + bool "Deny pivot_root in chroot"
53111 + depends on GRKERNSEC_CHROOT
53112 + help
53113 + If you say Y here, processes inside a chroot will not be able to use
53114 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53115 + works similar to chroot in that it changes the root filesystem. This
53116 + function could be misused in a chrooted process to attempt to break out
53117 + of the chroot, and therefore should not be allowed. If the sysctl
53118 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53119 + created.
53120 +
53121 +config GRKERNSEC_CHROOT_CHDIR
53122 + bool "Enforce chdir(\"/\") on all chroots"
53123 + depends on GRKERNSEC_CHROOT
53124 + help
53125 + If you say Y here, the current working directory of all newly-chrooted
53126 + applications will be set to the the root directory of the chroot.
53127 + The man page on chroot(2) states:
53128 + Note that this call does not change the current working
53129 + directory, so that `.' can be outside the tree rooted at
53130 + `/'. In particular, the super-user can escape from a
53131 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53132 +
53133 + It is recommended that you say Y here, since it's not known to break
53134 + any software. If the sysctl option is enabled, a sysctl option with
53135 + name "chroot_enforce_chdir" is created.
53136 +
53137 +config GRKERNSEC_CHROOT_CHMOD
53138 + bool "Deny (f)chmod +s"
53139 + depends on GRKERNSEC_CHROOT
53140 + help
53141 + If you say Y here, processes inside a chroot will not be able to chmod
53142 + or fchmod files to make them have suid or sgid bits. This protects
53143 + against another published method of breaking a chroot. If the sysctl
53144 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53145 + created.
53146 +
53147 +config GRKERNSEC_CHROOT_FCHDIR
53148 + bool "Deny fchdir out of chroot"
53149 + depends on GRKERNSEC_CHROOT
53150 + help
53151 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53152 + to a file descriptor of the chrooting process that points to a directory
53153 + outside the filesystem will be stopped. If the sysctl option
53154 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53155 +
53156 +config GRKERNSEC_CHROOT_MKNOD
53157 + bool "Deny mknod"
53158 + depends on GRKERNSEC_CHROOT
53159 + help
53160 + If you say Y here, processes inside a chroot will not be allowed to
53161 + mknod. The problem with using mknod inside a chroot is that it
53162 + would allow an attacker to create a device entry that is the same
53163 + as one on the physical root of your system, which could range from
53164 + anything from the console device to a device for your harddrive (which
53165 + they could then use to wipe the drive or steal data). It is recommended
53166 + that you say Y here, unless you run into software incompatibilities.
53167 + If the sysctl option is enabled, a sysctl option with name
53168 + "chroot_deny_mknod" is created.
53169 +
53170 +config GRKERNSEC_CHROOT_SHMAT
53171 + bool "Deny shmat() out of chroot"
53172 + depends on GRKERNSEC_CHROOT
53173 + help
53174 + If you say Y here, processes inside a chroot will not be able to attach
53175 + to shared memory segments that were created outside of the chroot jail.
53176 + It is recommended that you say Y here. If the sysctl option is enabled,
53177 + a sysctl option with name "chroot_deny_shmat" is created.
53178 +
53179 +config GRKERNSEC_CHROOT_UNIX
53180 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53181 + depends on GRKERNSEC_CHROOT
53182 + help
53183 + If you say Y here, processes inside a chroot will not be able to
53184 + connect to abstract (meaning not belonging to a filesystem) Unix
53185 + domain sockets that were bound outside of a chroot. It is recommended
53186 + that you say Y here. If the sysctl option is enabled, a sysctl option
53187 + with name "chroot_deny_unix" is created.
53188 +
53189 +config GRKERNSEC_CHROOT_FINDTASK
53190 + bool "Protect outside processes"
53191 + depends on GRKERNSEC_CHROOT
53192 + help
53193 + If you say Y here, processes inside a chroot will not be able to
53194 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53195 + getsid, or view any process outside of the chroot. If the sysctl
53196 + option is enabled, a sysctl option with name "chroot_findtask" is
53197 + created.
53198 +
53199 +config GRKERNSEC_CHROOT_NICE
53200 + bool "Restrict priority changes"
53201 + depends on GRKERNSEC_CHROOT
53202 + help
53203 + If you say Y here, processes inside a chroot will not be able to raise
53204 + the priority of processes in the chroot, or alter the priority of
53205 + processes outside the chroot. This provides more security than simply
53206 + removing CAP_SYS_NICE from the process' capability set. If the
53207 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53208 + is created.
53209 +
53210 +config GRKERNSEC_CHROOT_SYSCTL
53211 + bool "Deny sysctl writes"
53212 + depends on GRKERNSEC_CHROOT
53213 + help
53214 + If you say Y here, an attacker in a chroot will not be able to
53215 + write to sysctl entries, either by sysctl(2) or through a /proc
53216 + interface. It is strongly recommended that you say Y here. If the
53217 + sysctl option is enabled, a sysctl option with name
53218 + "chroot_deny_sysctl" is created.
53219 +
53220 +config GRKERNSEC_CHROOT_CAPS
53221 + bool "Capability restrictions"
53222 + depends on GRKERNSEC_CHROOT
53223 + help
53224 + If you say Y here, the capabilities on all root processes within a
53225 + chroot jail will be lowered to stop module insertion, raw i/o,
53226 + system and net admin tasks, rebooting the system, modifying immutable
53227 + files, modifying IPC owned by another, and changing the system time.
53228 + This is left an option because it can break some apps. Disable this
53229 + if your chrooted apps are having problems performing those kinds of
53230 + tasks. If the sysctl option is enabled, a sysctl option with
53231 + name "chroot_caps" is created.
53232 +
53233 +endmenu
53234 +menu "Kernel Auditing"
53235 +depends on GRKERNSEC
53236 +
53237 +config GRKERNSEC_AUDIT_GROUP
53238 + bool "Single group for auditing"
53239 + help
53240 + If you say Y here, the exec, chdir, and (un)mount logging features
53241 + will only operate on a group you specify. This option is recommended
53242 + if you only want to watch certain users instead of having a large
53243 + amount of logs from the entire system. If the sysctl option is enabled,
53244 + a sysctl option with name "audit_group" is created.
53245 +
53246 +config GRKERNSEC_AUDIT_GID
53247 + int "GID for auditing"
53248 + depends on GRKERNSEC_AUDIT_GROUP
53249 + default 1007
53250 +
53251 +config GRKERNSEC_EXECLOG
53252 + bool "Exec logging"
53253 + help
53254 + If you say Y here, all execve() calls will be logged (since the
53255 + other exec*() calls are frontends to execve(), all execution
53256 + will be logged). Useful for shell-servers that like to keep track
53257 + of their users. If the sysctl option is enabled, a sysctl option with
53258 + name "exec_logging" is created.
53259 + WARNING: This option when enabled will produce a LOT of logs, especially
53260 + on an active system.
53261 +
53262 +config GRKERNSEC_RESLOG
53263 + bool "Resource logging"
53264 + help
53265 + If you say Y here, all attempts to overstep resource limits will
53266 + be logged with the resource name, the requested size, and the current
53267 + limit. It is highly recommended that you say Y here. If the sysctl
53268 + option is enabled, a sysctl option with name "resource_logging" is
53269 + created. If the RBAC system is enabled, the sysctl value is ignored.
53270 +
53271 +config GRKERNSEC_CHROOT_EXECLOG
53272 + bool "Log execs within chroot"
53273 + help
53274 + If you say Y here, all executions inside a chroot jail will be logged
53275 + to syslog. This can cause a large amount of logs if certain
53276 + applications (eg. djb's daemontools) are installed on the system, and
53277 + is therefore left as an option. If the sysctl option is enabled, a
53278 + sysctl option with name "chroot_execlog" is created.
53279 +
53280 +config GRKERNSEC_AUDIT_PTRACE
53281 + bool "Ptrace logging"
53282 + help
53283 + If you say Y here, all attempts to attach to a process via ptrace
53284 + will be logged. If the sysctl option is enabled, a sysctl option
53285 + with name "audit_ptrace" is created.
53286 +
53287 +config GRKERNSEC_AUDIT_CHDIR
53288 + bool "Chdir logging"
53289 + help
53290 + If you say Y here, all chdir() calls will be logged. If the sysctl
53291 + option is enabled, a sysctl option with name "audit_chdir" is created.
53292 +
53293 +config GRKERNSEC_AUDIT_MOUNT
53294 + bool "(Un)Mount logging"
53295 + help
53296 + If you say Y here, all mounts and unmounts will be logged. If the
53297 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53298 + created.
53299 +
53300 +config GRKERNSEC_SIGNAL
53301 + bool "Signal logging"
53302 + help
53303 + If you say Y here, certain important signals will be logged, such as
53304 + SIGSEGV, which will as a result inform you of when a error in a program
53305 + occurred, which in some cases could mean a possible exploit attempt.
53306 + If the sysctl option is enabled, a sysctl option with name
53307 + "signal_logging" is created.
53308 +
53309 +config GRKERNSEC_FORKFAIL
53310 + bool "Fork failure logging"
53311 + help
53312 + If you say Y here, all failed fork() attempts will be logged.
53313 + This could suggest a fork bomb, or someone attempting to overstep
53314 + their process limit. If the sysctl option is enabled, a sysctl option
53315 + with name "forkfail_logging" is created.
53316 +
53317 +config GRKERNSEC_TIME
53318 + bool "Time change logging"
53319 + help
53320 + If you say Y here, any changes of the system clock will be logged.
53321 + If the sysctl option is enabled, a sysctl option with name
53322 + "timechange_logging" is created.
53323 +
53324 +config GRKERNSEC_PROC_IPADDR
53325 + bool "/proc/<pid>/ipaddr support"
53326 + help
53327 + If you say Y here, a new entry will be added to each /proc/<pid>
53328 + directory that contains the IP address of the person using the task.
53329 + The IP is carried across local TCP and AF_UNIX stream sockets.
53330 + This information can be useful for IDS/IPSes to perform remote response
53331 + to a local attack. The entry is readable by only the owner of the
53332 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53333 + the RBAC system), and thus does not create privacy concerns.
53334 +
53335 +config GRKERNSEC_RWXMAP_LOG
53336 + bool 'Denied RWX mmap/mprotect logging'
53337 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53338 + help
53339 + If you say Y here, calls to mmap() and mprotect() with explicit
53340 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53341 + denied by the PAX_MPROTECT feature. If the sysctl option is
53342 + enabled, a sysctl option with name "rwxmap_logging" is created.
53343 +
53344 +config GRKERNSEC_AUDIT_TEXTREL
53345 + bool 'ELF text relocations logging (READ HELP)'
53346 + depends on PAX_MPROTECT
53347 + help
53348 + If you say Y here, text relocations will be logged with the filename
53349 + of the offending library or binary. The purpose of the feature is
53350 + to help Linux distribution developers get rid of libraries and
53351 + binaries that need text relocations which hinder the future progress
53352 + of PaX. Only Linux distribution developers should say Y here, and
53353 + never on a production machine, as this option creates an information
53354 + leak that could aid an attacker in defeating the randomization of
53355 + a single memory region. If the sysctl option is enabled, a sysctl
53356 + option with name "audit_textrel" is created.
53357 +
53358 +endmenu
53359 +
53360 +menu "Executable Protections"
53361 +depends on GRKERNSEC
53362 +
53363 +config GRKERNSEC_DMESG
53364 + bool "Dmesg(8) restriction"
53365 + help
53366 + If you say Y here, non-root users will not be able to use dmesg(8)
53367 + to view up to the last 4kb of messages in the kernel's log buffer.
53368 + The kernel's log buffer often contains kernel addresses and other
53369 + identifying information useful to an attacker in fingerprinting a
53370 + system for a targeted exploit.
53371 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53372 + created.
53373 +
53374 +config GRKERNSEC_HARDEN_PTRACE
53375 + bool "Deter ptrace-based process snooping"
53376 + help
53377 + If you say Y here, TTY sniffers and other malicious monitoring
53378 + programs implemented through ptrace will be defeated. If you
53379 + have been using the RBAC system, this option has already been
53380 + enabled for several years for all users, with the ability to make
53381 + fine-grained exceptions.
53382 +
53383 + This option only affects the ability of non-root users to ptrace
53384 + processes that are not a descendent of the ptracing process.
53385 + This means that strace ./binary and gdb ./binary will still work,
53386 + but attaching to arbitrary processes will not. If the sysctl
53387 + option is enabled, a sysctl option with name "harden_ptrace" is
53388 + created.
53389 +
53390 +config GRKERNSEC_TPE
53391 + bool "Trusted Path Execution (TPE)"
53392 + help
53393 + If you say Y here, you will be able to choose a gid to add to the
53394 + supplementary groups of users you want to mark as "untrusted."
53395 + These users will not be able to execute any files that are not in
53396 + root-owned directories writable only by root. If the sysctl option
53397 + is enabled, a sysctl option with name "tpe" is created.
53398 +
53399 +config GRKERNSEC_TPE_ALL
53400 + bool "Partially restrict all non-root users"
53401 + depends on GRKERNSEC_TPE
53402 + help
53403 + If you say Y here, all non-root users will be covered under
53404 + a weaker TPE restriction. This is separate from, and in addition to,
53405 + the main TPE options that you have selected elsewhere. Thus, if a
53406 + "trusted" GID is chosen, this restriction applies to even that GID.
53407 + Under this restriction, all non-root users will only be allowed to
53408 + execute files in directories they own that are not group or
53409 + world-writable, or in directories owned by root and writable only by
53410 + root. If the sysctl option is enabled, a sysctl option with name
53411 + "tpe_restrict_all" is created.
53412 +
53413 +config GRKERNSEC_TPE_INVERT
53414 + bool "Invert GID option"
53415 + depends on GRKERNSEC_TPE
53416 + help
53417 + If you say Y here, the group you specify in the TPE configuration will
53418 + decide what group TPE restrictions will be *disabled* for. This
53419 + option is useful if you want TPE restrictions to be applied to most
53420 + users on the system. If the sysctl option is enabled, a sysctl option
53421 + with name "tpe_invert" is created. Unlike other sysctl options, this
53422 + entry will default to on for backward-compatibility.
53423 +
53424 +config GRKERNSEC_TPE_GID
53425 + int "GID for untrusted users"
53426 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53427 + default 1005
53428 + help
53429 + Setting this GID determines what group TPE restrictions will be
53430 + *enabled* for. If the sysctl option is enabled, a sysctl option
53431 + with name "tpe_gid" is created.
53432 +
53433 +config GRKERNSEC_TPE_GID
53434 + int "GID for trusted users"
53435 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53436 + default 1005
53437 + help
53438 + Setting this GID determines what group TPE restrictions will be
53439 + *disabled* for. If the sysctl option is enabled, a sysctl option
53440 + with name "tpe_gid" is created.
53441 +
53442 +endmenu
53443 +menu "Network Protections"
53444 +depends on GRKERNSEC
53445 +
53446 +config GRKERNSEC_RANDNET
53447 + bool "Larger entropy pools"
53448 + help
53449 + If you say Y here, the entropy pools used for many features of Linux
53450 + and grsecurity will be doubled in size. Since several grsecurity
53451 + features use additional randomness, it is recommended that you say Y
53452 + here. Saying Y here has a similar effect as modifying
53453 + /proc/sys/kernel/random/poolsize.
53454 +
53455 +config GRKERNSEC_BLACKHOLE
53456 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53457 + depends on NET
53458 + help
53459 + If you say Y here, neither TCP resets nor ICMP
53460 + destination-unreachable packets will be sent in response to packets
53461 + sent to ports for which no associated listening process exists.
53462 + This feature supports both IPV4 and IPV6 and exempts the
53463 + loopback interface from blackholing. Enabling this feature
53464 + makes a host more resilient to DoS attacks and reduces network
53465 + visibility against scanners.
53466 +
53467 + The blackhole feature as-implemented is equivalent to the FreeBSD
53468 + blackhole feature, as it prevents RST responses to all packets, not
53469 + just SYNs. Under most application behavior this causes no
53470 + problems, but applications (like haproxy) may not close certain
53471 + connections in a way that cleanly terminates them on the remote
53472 + end, leaving the remote host in LAST_ACK state. Because of this
53473 + side-effect and to prevent intentional LAST_ACK DoSes, this
53474 + feature also adds automatic mitigation against such attacks.
53475 + The mitigation drastically reduces the amount of time a socket
53476 + can spend in LAST_ACK state. If you're using haproxy and not
53477 + all servers it connects to have this option enabled, consider
53478 + disabling this feature on the haproxy host.
53479 +
53480 + If the sysctl option is enabled, two sysctl options with names
53481 + "ip_blackhole" and "lastack_retries" will be created.
53482 + While "ip_blackhole" takes the standard zero/non-zero on/off
53483 + toggle, "lastack_retries" uses the same kinds of values as
53484 + "tcp_retries1" and "tcp_retries2". The default value of 4
53485 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53486 + state.
53487 +
53488 +config GRKERNSEC_SOCKET
53489 + bool "Socket restrictions"
53490 + depends on NET
53491 + help
53492 + If you say Y here, you will be able to choose from several options.
53493 + If you assign a GID on your system and add it to the supplementary
53494 + groups of users you want to restrict socket access to, this patch
53495 + will perform up to three things, based on the option(s) you choose.
53496 +
53497 +config GRKERNSEC_SOCKET_ALL
53498 + bool "Deny any sockets to group"
53499 + depends on GRKERNSEC_SOCKET
53500 + help
53501 + If you say Y here, you will be able to choose a GID of whose users will
53502 + be unable to connect to other hosts from your machine or run server
53503 + applications from your machine. If the sysctl option is enabled, a
53504 + sysctl option with name "socket_all" is created.
53505 +
53506 +config GRKERNSEC_SOCKET_ALL_GID
53507 + int "GID to deny all sockets for"
53508 + depends on GRKERNSEC_SOCKET_ALL
53509 + default 1004
53510 + help
53511 + Here you can choose the GID to disable socket access for. Remember to
53512 + add the users you want socket access disabled for to the GID
53513 + specified here. If the sysctl option is enabled, a sysctl option
53514 + with name "socket_all_gid" is created.
53515 +
53516 +config GRKERNSEC_SOCKET_CLIENT
53517 + bool "Deny client sockets to group"
53518 + depends on GRKERNSEC_SOCKET
53519 + help
53520 + If you say Y here, you will be able to choose a GID of whose users will
53521 + be unable to connect to other hosts from your machine, but will be
53522 + able to run servers. If this option is enabled, all users in the group
53523 + you specify will have to use passive mode when initiating ftp transfers
53524 + from the shell on your machine. If the sysctl option is enabled, a
53525 + sysctl option with name "socket_client" is created.
53526 +
53527 +config GRKERNSEC_SOCKET_CLIENT_GID
53528 + int "GID to deny client sockets for"
53529 + depends on GRKERNSEC_SOCKET_CLIENT
53530 + default 1003
53531 + help
53532 + Here you can choose the GID to disable client socket access for.
53533 + Remember to add the users you want client socket access disabled for to
53534 + the GID specified here. If the sysctl option is enabled, a sysctl
53535 + option with name "socket_client_gid" is created.
53536 +
53537 +config GRKERNSEC_SOCKET_SERVER
53538 + bool "Deny server sockets to group"
53539 + depends on GRKERNSEC_SOCKET
53540 + help
53541 + If you say Y here, you will be able to choose a GID of whose users will
53542 + be unable to run server applications from your machine. If the sysctl
53543 + option is enabled, a sysctl option with name "socket_server" is created.
53544 +
53545 +config GRKERNSEC_SOCKET_SERVER_GID
53546 + int "GID to deny server sockets for"
53547 + depends on GRKERNSEC_SOCKET_SERVER
53548 + default 1002
53549 + help
53550 + Here you can choose the GID to disable server socket access for.
53551 + Remember to add the users you want server socket access disabled for to
53552 + the GID specified here. If the sysctl option is enabled, a sysctl
53553 + option with name "socket_server_gid" is created.
53554 +
53555 +endmenu
53556 +menu "Sysctl support"
53557 +depends on GRKERNSEC && SYSCTL
53558 +
53559 +config GRKERNSEC_SYSCTL
53560 + bool "Sysctl support"
53561 + help
53562 + If you say Y here, you will be able to change the options that
53563 + grsecurity runs with at bootup, without having to recompile your
53564 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53565 + to enable (1) or disable (0) various features. All the sysctl entries
53566 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53567 + All features enabled in the kernel configuration are disabled at boot
53568 + if you do not say Y to the "Turn on features by default" option.
53569 + All options should be set at startup, and the grsec_lock entry should
53570 + be set to a non-zero value after all the options are set.
53571 + *THIS IS EXTREMELY IMPORTANT*
53572 +
53573 +config GRKERNSEC_SYSCTL_DISTRO
53574 + bool "Extra sysctl support for distro makers (READ HELP)"
53575 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53576 + help
53577 + If you say Y here, additional sysctl options will be created
53578 + for features that affect processes running as root. Therefore,
53579 + it is critical when using this option that the grsec_lock entry be
53580 + enabled after boot. Only distros with prebuilt kernel packages
53581 + with this option enabled that can ensure grsec_lock is enabled
53582 + after boot should use this option.
53583 + *Failure to set grsec_lock after boot makes all grsec features
53584 + this option covers useless*
53585 +
53586 + Currently this option creates the following sysctl entries:
53587 + "Disable Privileged I/O": "disable_priv_io"
53588 +
53589 +config GRKERNSEC_SYSCTL_ON
53590 + bool "Turn on features by default"
53591 + depends on GRKERNSEC_SYSCTL
53592 + help
53593 + If you say Y here, instead of having all features enabled in the
53594 + kernel configuration disabled at boot time, the features will be
53595 + enabled at boot time. It is recommended you say Y here unless
53596 + there is some reason you would want all sysctl-tunable features to
53597 + be disabled by default. As mentioned elsewhere, it is important
53598 + to enable the grsec_lock entry once you have finished modifying
53599 + the sysctl entries.
53600 +
53601 +endmenu
53602 +menu "Logging Options"
53603 +depends on GRKERNSEC
53604 +
53605 +config GRKERNSEC_FLOODTIME
53606 + int "Seconds in between log messages (minimum)"
53607 + default 10
53608 + help
53609 + This option allows you to enforce the number of seconds between
53610 + grsecurity log messages. The default should be suitable for most
53611 + people, however, if you choose to change it, choose a value small enough
53612 + to allow informative logs to be produced, but large enough to
53613 + prevent flooding.
53614 +
53615 +config GRKERNSEC_FLOODBURST
53616 + int "Number of messages in a burst (maximum)"
53617 + default 4
53618 + help
53619 + This option allows you to choose the maximum number of messages allowed
53620 + within the flood time interval you chose in a separate option. The
53621 + default should be suitable for most people, however if you find that
53622 + many of your logs are being interpreted as flooding, you may want to
53623 + raise this value.
53624 +
53625 +endmenu
53626 +
53627 +endmenu
53628 diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
53629 --- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53630 +++ linux-3.0.4/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
53631 @@ -0,0 +1,34 @@
53632 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53633 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53634 +# into an RBAC system
53635 +#
53636 +# All code in this directory and various hooks inserted throughout the kernel
53637 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53638 +# under the GPL v2 or higher
53639 +
53640 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53641 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
53642 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53643 +
53644 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53645 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53646 + gracl_learn.o grsec_log.o
53647 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53648 +
53649 +ifdef CONFIG_NET
53650 +obj-y += grsec_sock.o
53651 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53652 +endif
53653 +
53654 +ifndef CONFIG_GRKERNSEC
53655 +obj-y += grsec_disabled.o
53656 +endif
53657 +
53658 +ifdef CONFIG_GRKERNSEC_HIDESYM
53659 +extra-y := grsec_hidesym.o
53660 +$(obj)/grsec_hidesym.o:
53661 + @-chmod -f 500 /boot
53662 + @-chmod -f 500 /lib/modules
53663 + @-chmod -f 700 .
53664 + @echo ' grsec: protected kernel image paths'
53665 +endif
53666 diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
53667 --- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
53668 +++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
53669 @@ -107,7 +107,7 @@ struct acpi_device_ops {
53670 acpi_op_bind bind;
53671 acpi_op_unbind unbind;
53672 acpi_op_notify notify;
53673 -};
53674 +} __no_const;
53675
53676 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
53677
53678 diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
53679 --- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
53680 +++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
53681 @@ -22,6 +22,12 @@
53682
53683 typedef atomic64_t atomic_long_t;
53684
53685 +#ifdef CONFIG_PAX_REFCOUNT
53686 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53687 +#else
53688 +typedef atomic64_t atomic_long_unchecked_t;
53689 +#endif
53690 +
53691 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53692
53693 static inline long atomic_long_read(atomic_long_t *l)
53694 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53695 return (long)atomic64_read(v);
53696 }
53697
53698 +#ifdef CONFIG_PAX_REFCOUNT
53699 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53700 +{
53701 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53702 +
53703 + return (long)atomic64_read_unchecked(v);
53704 +}
53705 +#endif
53706 +
53707 static inline void atomic_long_set(atomic_long_t *l, long i)
53708 {
53709 atomic64_t *v = (atomic64_t *)l;
53710 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53711 atomic64_set(v, i);
53712 }
53713
53714 +#ifdef CONFIG_PAX_REFCOUNT
53715 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53716 +{
53717 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53718 +
53719 + atomic64_set_unchecked(v, i);
53720 +}
53721 +#endif
53722 +
53723 static inline void atomic_long_inc(atomic_long_t *l)
53724 {
53725 atomic64_t *v = (atomic64_t *)l;
53726 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53727 atomic64_inc(v);
53728 }
53729
53730 +#ifdef CONFIG_PAX_REFCOUNT
53731 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53732 +{
53733 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53734 +
53735 + atomic64_inc_unchecked(v);
53736 +}
53737 +#endif
53738 +
53739 static inline void atomic_long_dec(atomic_long_t *l)
53740 {
53741 atomic64_t *v = (atomic64_t *)l;
53742 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53743 atomic64_dec(v);
53744 }
53745
53746 +#ifdef CONFIG_PAX_REFCOUNT
53747 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53748 +{
53749 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53750 +
53751 + atomic64_dec_unchecked(v);
53752 +}
53753 +#endif
53754 +
53755 static inline void atomic_long_add(long i, atomic_long_t *l)
53756 {
53757 atomic64_t *v = (atomic64_t *)l;
53758 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53759 atomic64_add(i, v);
53760 }
53761
53762 +#ifdef CONFIG_PAX_REFCOUNT
53763 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53764 +{
53765 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53766 +
53767 + atomic64_add_unchecked(i, v);
53768 +}
53769 +#endif
53770 +
53771 static inline void atomic_long_sub(long i, atomic_long_t *l)
53772 {
53773 atomic64_t *v = (atomic64_t *)l;
53774 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
53775 atomic64_sub(i, v);
53776 }
53777
53778 +#ifdef CONFIG_PAX_REFCOUNT
53779 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53780 +{
53781 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53782 +
53783 + atomic64_sub_unchecked(i, v);
53784 +}
53785 +#endif
53786 +
53787 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53788 {
53789 atomic64_t *v = (atomic64_t *)l;
53790 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
53791 return (long)atomic64_inc_return(v);
53792 }
53793
53794 +#ifdef CONFIG_PAX_REFCOUNT
53795 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53796 +{
53797 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53798 +
53799 + return (long)atomic64_inc_return_unchecked(v);
53800 +}
53801 +#endif
53802 +
53803 static inline long atomic_long_dec_return(atomic_long_t *l)
53804 {
53805 atomic64_t *v = (atomic64_t *)l;
53806 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
53807
53808 typedef atomic_t atomic_long_t;
53809
53810 +#ifdef CONFIG_PAX_REFCOUNT
53811 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53812 +#else
53813 +typedef atomic_t atomic_long_unchecked_t;
53814 +#endif
53815 +
53816 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53817 static inline long atomic_long_read(atomic_long_t *l)
53818 {
53819 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
53820 return (long)atomic_read(v);
53821 }
53822
53823 +#ifdef CONFIG_PAX_REFCOUNT
53824 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53825 +{
53826 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53827 +
53828 + return (long)atomic_read_unchecked(v);
53829 +}
53830 +#endif
53831 +
53832 static inline void atomic_long_set(atomic_long_t *l, long i)
53833 {
53834 atomic_t *v = (atomic_t *)l;
53835 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
53836 atomic_set(v, i);
53837 }
53838
53839 +#ifdef CONFIG_PAX_REFCOUNT
53840 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53841 +{
53842 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53843 +
53844 + atomic_set_unchecked(v, i);
53845 +}
53846 +#endif
53847 +
53848 static inline void atomic_long_inc(atomic_long_t *l)
53849 {
53850 atomic_t *v = (atomic_t *)l;
53851 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
53852 atomic_inc(v);
53853 }
53854
53855 +#ifdef CONFIG_PAX_REFCOUNT
53856 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53857 +{
53858 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53859 +
53860 + atomic_inc_unchecked(v);
53861 +}
53862 +#endif
53863 +
53864 static inline void atomic_long_dec(atomic_long_t *l)
53865 {
53866 atomic_t *v = (atomic_t *)l;
53867 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
53868 atomic_dec(v);
53869 }
53870
53871 +#ifdef CONFIG_PAX_REFCOUNT
53872 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53873 +{
53874 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53875 +
53876 + atomic_dec_unchecked(v);
53877 +}
53878 +#endif
53879 +
53880 static inline void atomic_long_add(long i, atomic_long_t *l)
53881 {
53882 atomic_t *v = (atomic_t *)l;
53883 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
53884 atomic_add(i, v);
53885 }
53886
53887 +#ifdef CONFIG_PAX_REFCOUNT
53888 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53889 +{
53890 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53891 +
53892 + atomic_add_unchecked(i, v);
53893 +}
53894 +#endif
53895 +
53896 static inline void atomic_long_sub(long i, atomic_long_t *l)
53897 {
53898 atomic_t *v = (atomic_t *)l;
53899 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
53900 atomic_sub(i, v);
53901 }
53902
53903 +#ifdef CONFIG_PAX_REFCOUNT
53904 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53905 +{
53906 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53907 +
53908 + atomic_sub_unchecked(i, v);
53909 +}
53910 +#endif
53911 +
53912 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53913 {
53914 atomic_t *v = (atomic_t *)l;
53915 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
53916 return (long)atomic_inc_return(v);
53917 }
53918
53919 +#ifdef CONFIG_PAX_REFCOUNT
53920 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53921 +{
53922 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53923 +
53924 + return (long)atomic_inc_return_unchecked(v);
53925 +}
53926 +#endif
53927 +
53928 static inline long atomic_long_dec_return(atomic_long_t *l)
53929 {
53930 atomic_t *v = (atomic_t *)l;
53931 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
53932
53933 #endif /* BITS_PER_LONG == 64 */
53934
53935 +#ifdef CONFIG_PAX_REFCOUNT
53936 +static inline void pax_refcount_needs_these_functions(void)
53937 +{
53938 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53939 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53940 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53941 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53942 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53943 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53944 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53945 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53946 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53947 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53948 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53949 +
53950 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53951 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53952 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53953 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
53954 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53955 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53956 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53957 +}
53958 +#else
53959 +#define atomic_read_unchecked(v) atomic_read(v)
53960 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53961 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53962 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53963 +#define atomic_inc_unchecked(v) atomic_inc(v)
53964 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53965 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53966 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53967 +#define atomic_dec_unchecked(v) atomic_dec(v)
53968 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53969 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53970 +
53971 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53972 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53973 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53974 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
53975 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53976 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53977 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53978 +#endif
53979 +
53980 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53981 diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
53982 --- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
53983 +++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
53984 @@ -6,7 +6,7 @@
53985 * cache lines need to provide their own cache.h.
53986 */
53987
53988 -#define L1_CACHE_SHIFT 5
53989 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53990 +#define L1_CACHE_SHIFT 5UL
53991 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
53992
53993 #endif /* __ASM_GENERIC_CACHE_H */
53994 diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
53995 --- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
53996 +++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
53997 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53998 typedef signed long s64;
53999 typedef unsigned long u64;
54000
54001 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54002 +
54003 #define S8_C(x) x
54004 #define U8_C(x) x ## U
54005 #define S16_C(x) x
54006 diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54007 --- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
54008 +++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
54009 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54010 typedef signed long long s64;
54011 typedef unsigned long long u64;
54012
54013 +typedef unsigned long long intoverflow_t;
54014 +
54015 #define S8_C(x) x
54016 #define U8_C(x) x ## U
54017 #define S16_C(x) x
54018 diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54019 --- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
54020 +++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
54021 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
54022 KMAP_D(17) KM_NMI,
54023 KMAP_D(18) KM_NMI_PTE,
54024 KMAP_D(19) KM_KDB,
54025 +KMAP_D(20) KM_CLEARPAGE,
54026 /*
54027 * Remember to update debug_kmap_atomic() when adding new kmap types!
54028 */
54029 -KMAP_D(20) KM_TYPE_NR
54030 +KMAP_D(21) KM_TYPE_NR
54031 };
54032
54033 #undef KMAP_D
54034 diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54035 --- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
54036 +++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
54037 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54038 #endif /* __HAVE_ARCH_PMD_WRITE */
54039 #endif
54040
54041 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54042 +static inline unsigned long pax_open_kernel(void) { return 0; }
54043 +#endif
54044 +
54045 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54046 +static inline unsigned long pax_close_kernel(void) { return 0; }
54047 +#endif
54048 +
54049 #endif /* !__ASSEMBLY__ */
54050
54051 #endif /* _ASM_GENERIC_PGTABLE_H */
54052 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54053 --- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
54054 +++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
54055 @@ -1,14 +1,19 @@
54056 #ifndef _PGTABLE_NOPMD_H
54057 #define _PGTABLE_NOPMD_H
54058
54059 -#ifndef __ASSEMBLY__
54060 -
54061 #include <asm-generic/pgtable-nopud.h>
54062
54063 -struct mm_struct;
54064 -
54065 #define __PAGETABLE_PMD_FOLDED
54066
54067 +#define PMD_SHIFT PUD_SHIFT
54068 +#define PTRS_PER_PMD 1
54069 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54070 +#define PMD_MASK (~(PMD_SIZE-1))
54071 +
54072 +#ifndef __ASSEMBLY__
54073 +
54074 +struct mm_struct;
54075 +
54076 /*
54077 * Having the pmd type consist of a pud gets the size right, and allows
54078 * us to conceptually access the pud entry that this pmd is folded into
54079 @@ -16,11 +21,6 @@ struct mm_struct;
54080 */
54081 typedef struct { pud_t pud; } pmd_t;
54082
54083 -#define PMD_SHIFT PUD_SHIFT
54084 -#define PTRS_PER_PMD 1
54085 -#define PMD_SIZE (1UL << PMD_SHIFT)
54086 -#define PMD_MASK (~(PMD_SIZE-1))
54087 -
54088 /*
54089 * The "pud_xxx()" functions here are trivial for a folded two-level
54090 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54091 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54092 --- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
54093 +++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
54094 @@ -1,10 +1,15 @@
54095 #ifndef _PGTABLE_NOPUD_H
54096 #define _PGTABLE_NOPUD_H
54097
54098 -#ifndef __ASSEMBLY__
54099 -
54100 #define __PAGETABLE_PUD_FOLDED
54101
54102 +#define PUD_SHIFT PGDIR_SHIFT
54103 +#define PTRS_PER_PUD 1
54104 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54105 +#define PUD_MASK (~(PUD_SIZE-1))
54106 +
54107 +#ifndef __ASSEMBLY__
54108 +
54109 /*
54110 * Having the pud type consist of a pgd gets the size right, and allows
54111 * us to conceptually access the pgd entry that this pud is folded into
54112 @@ -12,11 +17,6 @@
54113 */
54114 typedef struct { pgd_t pgd; } pud_t;
54115
54116 -#define PUD_SHIFT PGDIR_SHIFT
54117 -#define PTRS_PER_PUD 1
54118 -#define PUD_SIZE (1UL << PUD_SHIFT)
54119 -#define PUD_MASK (~(PUD_SIZE-1))
54120 -
54121 /*
54122 * The "pgd_xxx()" functions here are trivial for a folded two-level
54123 * setup: the pud is never bad, and a pud always exists (as it's folded
54124 diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54125 --- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
54126 +++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
54127 @@ -217,6 +217,7 @@
54128 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54129 VMLINUX_SYMBOL(__start_rodata) = .; \
54130 *(.rodata) *(.rodata.*) \
54131 + *(.data..read_only) \
54132 *(__vermagic) /* Kernel version magic */ \
54133 . = ALIGN(8); \
54134 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
54135 @@ -723,17 +724,18 @@
54136 * section in the linker script will go there too. @phdr should have
54137 * a leading colon.
54138 *
54139 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54140 + * Note that this macros defines per_cpu_load as an absolute symbol.
54141 * If there is no need to put the percpu section at a predetermined
54142 * address, use PERCPU_SECTION.
54143 */
54144 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
54145 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54146 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54147 + per_cpu_load = .; \
54148 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54149 - LOAD_OFFSET) { \
54150 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54151 PERCPU_INPUT(cacheline) \
54152 } phdr \
54153 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54154 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54155
54156 /**
54157 * PERCPU_SECTION - define output section for percpu area, simple version
54158 diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54159 --- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
54160 +++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
54161 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54162
54163 /* disable crtc when not in use - more explicit than dpms off */
54164 void (*disable)(struct drm_crtc *crtc);
54165 -};
54166 +} __no_const;
54167
54168 struct drm_encoder_helper_funcs {
54169 void (*dpms)(struct drm_encoder *encoder, int mode);
54170 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54171 struct drm_connector *connector);
54172 /* disable encoder when not in use - more explicit than dpms off */
54173 void (*disable)(struct drm_encoder *encoder);
54174 -};
54175 +} __no_const;
54176
54177 struct drm_connector_helper_funcs {
54178 int (*get_modes)(struct drm_connector *connector);
54179 diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54180 --- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
54181 +++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
54182 @@ -73,6 +73,7 @@
54183 #include <linux/workqueue.h>
54184 #include <linux/poll.h>
54185 #include <asm/pgalloc.h>
54186 +#include <asm/local.h>
54187 #include "drm.h"
54188
54189 #include <linux/idr.h>
54190 @@ -1033,7 +1034,7 @@ struct drm_device {
54191
54192 /** \name Usage Counters */
54193 /*@{ */
54194 - int open_count; /**< Outstanding files open */
54195 + local_t open_count; /**< Outstanding files open */
54196 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54197 atomic_t vma_count; /**< Outstanding vma areas open */
54198 int buf_use; /**< Buffers in use -- cannot alloc */
54199 @@ -1044,7 +1045,7 @@ struct drm_device {
54200 /*@{ */
54201 unsigned long counters;
54202 enum drm_stat_type types[15];
54203 - atomic_t counts[15];
54204 + atomic_unchecked_t counts[15];
54205 /*@} */
54206
54207 struct list_head filelist;
54208 diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54209 --- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
54210 +++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
54211 @@ -47,7 +47,7 @@
54212
54213 struct ttm_mem_shrink {
54214 int (*do_shrink) (struct ttm_mem_shrink *);
54215 -};
54216 +} __no_const;
54217
54218 /**
54219 * struct ttm_mem_global - Global memory accounting structure.
54220 diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54221 --- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
54222 +++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
54223 @@ -39,6 +39,14 @@ enum machine_type {
54224 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54225 };
54226
54227 +/* Constants for the N_FLAGS field */
54228 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54229 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54230 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54231 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54232 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54233 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54234 +
54235 #if !defined (N_MAGIC)
54236 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54237 #endif
54238 diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54239 --- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
54240 +++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
54241 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54242 #endif
54243
54244 struct k_atm_aal_stats {
54245 -#define __HANDLE_ITEM(i) atomic_t i
54246 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54247 __AAL_STAT_ITEMS
54248 #undef __HANDLE_ITEM
54249 };
54250 diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54251 --- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54252 +++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54253 @@ -88,6 +88,7 @@ struct linux_binfmt {
54254 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54255 int (*load_shlib)(struct file *);
54256 int (*core_dump)(struct coredump_params *cprm);
54257 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54258 unsigned long min_coredump; /* minimal dump size */
54259 };
54260
54261 diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54262 --- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
54263 +++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
54264 @@ -1308,7 +1308,7 @@ struct block_device_operations {
54265 /* this callback is with swap_lock and sometimes page table lock held */
54266 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54267 struct module *owner;
54268 -};
54269 +} __do_const;
54270
54271 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54272 unsigned long);
54273 diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54274 --- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
54275 +++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
54276 @@ -161,7 +161,7 @@ struct blk_trace {
54277 struct dentry *dir;
54278 struct dentry *dropped_file;
54279 struct dentry *msg_file;
54280 - atomic_t dropped;
54281 + atomic_unchecked_t dropped;
54282 };
54283
54284 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54285 diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54286 --- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54287 +++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54288 @@ -42,51 +42,51 @@
54289
54290 static inline __le64 __cpu_to_le64p(const __u64 *p)
54291 {
54292 - return (__force __le64)*p;
54293 + return (__force const __le64)*p;
54294 }
54295 static inline __u64 __le64_to_cpup(const __le64 *p)
54296 {
54297 - return (__force __u64)*p;
54298 + return (__force const __u64)*p;
54299 }
54300 static inline __le32 __cpu_to_le32p(const __u32 *p)
54301 {
54302 - return (__force __le32)*p;
54303 + return (__force const __le32)*p;
54304 }
54305 static inline __u32 __le32_to_cpup(const __le32 *p)
54306 {
54307 - return (__force __u32)*p;
54308 + return (__force const __u32)*p;
54309 }
54310 static inline __le16 __cpu_to_le16p(const __u16 *p)
54311 {
54312 - return (__force __le16)*p;
54313 + return (__force const __le16)*p;
54314 }
54315 static inline __u16 __le16_to_cpup(const __le16 *p)
54316 {
54317 - return (__force __u16)*p;
54318 + return (__force const __u16)*p;
54319 }
54320 static inline __be64 __cpu_to_be64p(const __u64 *p)
54321 {
54322 - return (__force __be64)__swab64p(p);
54323 + return (__force const __be64)__swab64p(p);
54324 }
54325 static inline __u64 __be64_to_cpup(const __be64 *p)
54326 {
54327 - return __swab64p((__u64 *)p);
54328 + return __swab64p((const __u64 *)p);
54329 }
54330 static inline __be32 __cpu_to_be32p(const __u32 *p)
54331 {
54332 - return (__force __be32)__swab32p(p);
54333 + return (__force const __be32)__swab32p(p);
54334 }
54335 static inline __u32 __be32_to_cpup(const __be32 *p)
54336 {
54337 - return __swab32p((__u32 *)p);
54338 + return __swab32p((const __u32 *)p);
54339 }
54340 static inline __be16 __cpu_to_be16p(const __u16 *p)
54341 {
54342 - return (__force __be16)__swab16p(p);
54343 + return (__force const __be16)__swab16p(p);
54344 }
54345 static inline __u16 __be16_to_cpup(const __be16 *p)
54346 {
54347 - return __swab16p((__u16 *)p);
54348 + return __swab16p((const __u16 *)p);
54349 }
54350 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54351 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54352 diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54353 --- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
54354 +++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
54355 @@ -16,6 +16,10 @@
54356 #define __read_mostly
54357 #endif
54358
54359 +#ifndef __read_only
54360 +#define __read_only __read_mostly
54361 +#endif
54362 +
54363 #ifndef ____cacheline_aligned
54364 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54365 #endif
54366 diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54367 --- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
54368 +++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
54369 @@ -547,6 +547,9 @@ extern bool capable(int cap);
54370 extern bool ns_capable(struct user_namespace *ns, int cap);
54371 extern bool task_ns_capable(struct task_struct *t, int cap);
54372 extern bool nsown_capable(int cap);
54373 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54374 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54375 +extern bool capable_nolog(int cap);
54376
54377 /* audit system wants to get cap info from files as well */
54378 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54379 diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54380 --- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
54381 +++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
54382 @@ -31,7 +31,7 @@ struct cleancache_ops {
54383 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54384 void (*flush_inode)(int, struct cleancache_filekey);
54385 void (*flush_fs)(int);
54386 -};
54387 +} __no_const;
54388
54389 extern struct cleancache_ops
54390 cleancache_register_ops(struct cleancache_ops *ops);
54391 diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54392 --- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
54393 +++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
54394 @@ -31,6 +31,12 @@
54395
54396
54397 #if __GNUC_MINOR__ >= 5
54398 +
54399 +#ifdef CONSTIFY_PLUGIN
54400 +#define __no_const __attribute__((no_const))
54401 +#define __do_const __attribute__((do_const))
54402 +#endif
54403 +
54404 /*
54405 * Mark a position in code as unreachable. This can be used to
54406 * suppress control flow warnings after asm blocks that transfer
54407 @@ -46,6 +52,11 @@
54408 #define __noclone __attribute__((__noclone__))
54409
54410 #endif
54411 +
54412 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54413 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54414 +#define __bos0(ptr) __bos((ptr), 0)
54415 +#define __bos1(ptr) __bos((ptr), 1)
54416 #endif
54417
54418 #if __GNUC_MINOR__ > 0
54419 diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
54420 --- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
54421 +++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
54422 @@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
54423 # define __attribute_const__ /* unimplemented */
54424 #endif
54425
54426 +#ifndef __no_const
54427 +# define __no_const
54428 +#endif
54429 +
54430 +#ifndef __do_const
54431 +# define __do_const
54432 +#endif
54433 +
54434 /*
54435 * Tell gcc if a function is cold. The compiler will assume any path
54436 * directly leading to the call is unlikely.
54437 @@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
54438 #define __cold
54439 #endif
54440
54441 +#ifndef __alloc_size
54442 +#define __alloc_size(...)
54443 +#endif
54444 +
54445 +#ifndef __bos
54446 +#define __bos(ptr, arg)
54447 +#endif
54448 +
54449 +#ifndef __bos0
54450 +#define __bos0(ptr)
54451 +#endif
54452 +
54453 +#ifndef __bos1
54454 +#define __bos1(ptr)
54455 +#endif
54456 +
54457 /* Simple shorthand for a section definition */
54458 #ifndef __section
54459 # define __section(S) __attribute__ ((__section__(#S)))
54460 @@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
54461 * use is to mediate communication between process-level code and irq/NMI
54462 * handlers, all running on the same CPU.
54463 */
54464 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54465 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54466 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54467
54468 #endif /* __LINUX_COMPILER_H */
54469 diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
54470 --- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
54471 +++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
54472 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
54473 * nodemask.
54474 */
54475 smp_mb();
54476 - --ACCESS_ONCE(current->mems_allowed_change_disable);
54477 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
54478 }
54479
54480 static inline void set_mems_allowed(nodemask_t nodemask)
54481 diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
54482 --- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
54483 +++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
54484 @@ -361,7 +361,7 @@ struct cipher_tfm {
54485 const u8 *key, unsigned int keylen);
54486 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54487 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54488 -};
54489 +} __no_const;
54490
54491 struct hash_tfm {
54492 int (*init)(struct hash_desc *desc);
54493 @@ -382,13 +382,13 @@ struct compress_tfm {
54494 int (*cot_decompress)(struct crypto_tfm *tfm,
54495 const u8 *src, unsigned int slen,
54496 u8 *dst, unsigned int *dlen);
54497 -};
54498 +} __no_const;
54499
54500 struct rng_tfm {
54501 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
54502 unsigned int dlen);
54503 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
54504 -};
54505 +} __no_const;
54506
54507 #define crt_ablkcipher crt_u.ablkcipher
54508 #define crt_aead crt_u.aead
54509 diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
54510 --- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
54511 +++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
54512 @@ -77,7 +77,7 @@ static void free(void *where)
54513 * warnings when not needed (indeed large_malloc / large_free are not
54514 * needed by inflate */
54515
54516 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54517 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54518 #define free(a) kfree(a)
54519
54520 #define large_malloc(a) vmalloc(a)
54521 diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
54522 --- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
54523 +++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
54524 @@ -50,7 +50,7 @@ struct dma_map_ops {
54525 int (*dma_supported)(struct device *dev, u64 mask);
54526 int (*set_dma_mask)(struct device *dev, u64 mask);
54527 int is_phys;
54528 -};
54529 +} __do_const;
54530
54531 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54532
54533 diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
54534 --- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
54535 +++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
54536 @@ -410,7 +410,7 @@ struct efivar_operations {
54537 efi_get_variable_t *get_variable;
54538 efi_get_next_variable_t *get_next_variable;
54539 efi_set_variable_t *set_variable;
54540 -};
54541 +} __no_const;
54542
54543 struct efivars {
54544 /*
54545 diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
54546 --- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
54547 +++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
54548 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54549 #define PT_GNU_EH_FRAME 0x6474e550
54550
54551 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54552 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54553 +
54554 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54555 +
54556 +/* Constants for the e_flags field */
54557 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54558 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54559 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54560 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54561 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54562 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54563
54564 /*
54565 * Extended Numbering
54566 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
54567 #define DT_DEBUG 21
54568 #define DT_TEXTREL 22
54569 #define DT_JMPREL 23
54570 +#define DT_FLAGS 30
54571 + #define DF_TEXTREL 0x00000004
54572 #define DT_ENCODING 32
54573 #define OLD_DT_LOOS 0x60000000
54574 #define DT_LOOS 0x6000000d
54575 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
54576 #define PF_W 0x2
54577 #define PF_X 0x1
54578
54579 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54580 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54581 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54582 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54583 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54584 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54585 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54586 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54587 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54588 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54589 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54590 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54591 +
54592 typedef struct elf32_phdr{
54593 Elf32_Word p_type;
54594 Elf32_Off p_offset;
54595 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
54596 #define EI_OSABI 7
54597 #define EI_PAD 8
54598
54599 +#define EI_PAX 14
54600 +
54601 #define ELFMAG0 0x7f /* EI_MAG */
54602 #define ELFMAG1 'E'
54603 #define ELFMAG2 'L'
54604 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
54605 #define elf_note elf32_note
54606 #define elf_addr_t Elf32_Off
54607 #define Elf_Half Elf32_Half
54608 +#define elf_dyn Elf32_Dyn
54609
54610 #else
54611
54612 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
54613 #define elf_note elf64_note
54614 #define elf_addr_t Elf64_Off
54615 #define Elf_Half Elf64_Half
54616 +#define elf_dyn Elf64_Dyn
54617
54618 #endif
54619
54620 diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
54621 --- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
54622 +++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
54623 @@ -428,7 +428,7 @@ struct fw_iso_context {
54624 union {
54625 fw_iso_callback_t sc;
54626 fw_iso_mc_callback_t mc;
54627 - } callback;
54628 + } __no_const callback;
54629 void *callback_data;
54630 };
54631
54632 diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
54633 --- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
54634 +++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
54635 @@ -102,7 +102,7 @@ struct fscache_operation {
54636 fscache_operation_release_t release;
54637 };
54638
54639 -extern atomic_t fscache_op_debug_id;
54640 +extern atomic_unchecked_t fscache_op_debug_id;
54641 extern void fscache_op_work_func(struct work_struct *work);
54642
54643 extern void fscache_enqueue_operation(struct fscache_operation *);
54644 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
54645 {
54646 INIT_WORK(&op->work, fscache_op_work_func);
54647 atomic_set(&op->usage, 1);
54648 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54649 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54650 op->processor = processor;
54651 op->release = release;
54652 INIT_LIST_HEAD(&op->pend_link);
54653 diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
54654 --- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
54655 +++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
54656 @@ -109,6 +109,11 @@ struct inodes_stat_t {
54657 /* File was opened by fanotify and shouldn't generate fanotify events */
54658 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
54659
54660 +/* Hack for grsec so as not to require read permission simply to execute
54661 + * a binary
54662 + */
54663 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54664 +
54665 /*
54666 * The below are the various read and write types that we support. Some of
54667 * them include behavioral modifiers that send information down to the
54668 @@ -1571,7 +1576,8 @@ struct file_operations {
54669 int (*setlease)(struct file *, long, struct file_lock **);
54670 long (*fallocate)(struct file *file, int mode, loff_t offset,
54671 loff_t len);
54672 -};
54673 +} __do_const;
54674 +typedef struct file_operations __no_const file_operations_no_const;
54675
54676 #define IPERM_FLAG_RCU 0x0001
54677
54678 diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
54679 --- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
54680 +++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
54681 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
54682 */
54683 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
54684 {
54685 - return kstrdup(name, GFP_KERNEL);
54686 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
54687 }
54688
54689 /*
54690 diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
54691 --- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
54692 +++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
54693 @@ -6,7 +6,7 @@
54694 #include <linux/seqlock.h>
54695
54696 struct fs_struct {
54697 - int users;
54698 + atomic_t users;
54699 spinlock_t lock;
54700 seqcount_t seq;
54701 int umask;
54702 diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
54703 --- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
54704 +++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
54705 @@ -96,7 +96,7 @@ struct trace_event_functions {
54706 trace_print_func raw;
54707 trace_print_func hex;
54708 trace_print_func binary;
54709 -};
54710 +} __no_const;
54711
54712 struct trace_event {
54713 struct hlist_node node;
54714 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
54715 extern int trace_add_event_call(struct ftrace_event_call *call);
54716 extern void trace_remove_event_call(struct ftrace_event_call *call);
54717
54718 -#define is_signed_type(type) (((type)(-1)) < 0)
54719 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54720
54721 int trace_set_clr_event(const char *system, const char *event, int set);
54722
54723 diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
54724 --- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
54725 +++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
54726 @@ -184,7 +184,7 @@ struct gendisk {
54727 struct kobject *slave_dir;
54728
54729 struct timer_rand_state *random;
54730 - atomic_t sync_io; /* RAID */
54731 + atomic_unchecked_t sync_io; /* RAID */
54732 struct disk_events *ev;
54733 #ifdef CONFIG_BLK_DEV_INTEGRITY
54734 struct blk_integrity *integrity;
54735 diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
54736 --- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54737 +++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
54738 @@ -0,0 +1,317 @@
54739 +#ifndef GR_ACL_H
54740 +#define GR_ACL_H
54741 +
54742 +#include <linux/grdefs.h>
54743 +#include <linux/resource.h>
54744 +#include <linux/capability.h>
54745 +#include <linux/dcache.h>
54746 +#include <asm/resource.h>
54747 +
54748 +/* Major status information */
54749 +
54750 +#define GR_VERSION "grsecurity 2.2.2"
54751 +#define GRSECURITY_VERSION 0x2202
54752 +
54753 +enum {
54754 + GR_SHUTDOWN = 0,
54755 + GR_ENABLE = 1,
54756 + GR_SPROLE = 2,
54757 + GR_RELOAD = 3,
54758 + GR_SEGVMOD = 4,
54759 + GR_STATUS = 5,
54760 + GR_UNSPROLE = 6,
54761 + GR_PASSSET = 7,
54762 + GR_SPROLEPAM = 8,
54763 +};
54764 +
54765 +/* Password setup definitions
54766 + * kernel/grhash.c */
54767 +enum {
54768 + GR_PW_LEN = 128,
54769 + GR_SALT_LEN = 16,
54770 + GR_SHA_LEN = 32,
54771 +};
54772 +
54773 +enum {
54774 + GR_SPROLE_LEN = 64,
54775 +};
54776 +
54777 +enum {
54778 + GR_NO_GLOB = 0,
54779 + GR_REG_GLOB,
54780 + GR_CREATE_GLOB
54781 +};
54782 +
54783 +#define GR_NLIMITS 32
54784 +
54785 +/* Begin Data Structures */
54786 +
54787 +struct sprole_pw {
54788 + unsigned char *rolename;
54789 + unsigned char salt[GR_SALT_LEN];
54790 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54791 +};
54792 +
54793 +struct name_entry {
54794 + __u32 key;
54795 + ino_t inode;
54796 + dev_t device;
54797 + char *name;
54798 + __u16 len;
54799 + __u8 deleted;
54800 + struct name_entry *prev;
54801 + struct name_entry *next;
54802 +};
54803 +
54804 +struct inodev_entry {
54805 + struct name_entry *nentry;
54806 + struct inodev_entry *prev;
54807 + struct inodev_entry *next;
54808 +};
54809 +
54810 +struct acl_role_db {
54811 + struct acl_role_label **r_hash;
54812 + __u32 r_size;
54813 +};
54814 +
54815 +struct inodev_db {
54816 + struct inodev_entry **i_hash;
54817 + __u32 i_size;
54818 +};
54819 +
54820 +struct name_db {
54821 + struct name_entry **n_hash;
54822 + __u32 n_size;
54823 +};
54824 +
54825 +struct crash_uid {
54826 + uid_t uid;
54827 + unsigned long expires;
54828 +};
54829 +
54830 +struct gr_hash_struct {
54831 + void **table;
54832 + void **nametable;
54833 + void *first;
54834 + __u32 table_size;
54835 + __u32 used_size;
54836 + int type;
54837 +};
54838 +
54839 +/* Userspace Grsecurity ACL data structures */
54840 +
54841 +struct acl_subject_label {
54842 + char *filename;
54843 + ino_t inode;
54844 + dev_t device;
54845 + __u32 mode;
54846 + kernel_cap_t cap_mask;
54847 + kernel_cap_t cap_lower;
54848 + kernel_cap_t cap_invert_audit;
54849 +
54850 + struct rlimit res[GR_NLIMITS];
54851 + __u32 resmask;
54852 +
54853 + __u8 user_trans_type;
54854 + __u8 group_trans_type;
54855 + uid_t *user_transitions;
54856 + gid_t *group_transitions;
54857 + __u16 user_trans_num;
54858 + __u16 group_trans_num;
54859 +
54860 + __u32 sock_families[2];
54861 + __u32 ip_proto[8];
54862 + __u32 ip_type;
54863 + struct acl_ip_label **ips;
54864 + __u32 ip_num;
54865 + __u32 inaddr_any_override;
54866 +
54867 + __u32 crashes;
54868 + unsigned long expires;
54869 +
54870 + struct acl_subject_label *parent_subject;
54871 + struct gr_hash_struct *hash;
54872 + struct acl_subject_label *prev;
54873 + struct acl_subject_label *next;
54874 +
54875 + struct acl_object_label **obj_hash;
54876 + __u32 obj_hash_size;
54877 + __u16 pax_flags;
54878 +};
54879 +
54880 +struct role_allowed_ip {
54881 + __u32 addr;
54882 + __u32 netmask;
54883 +
54884 + struct role_allowed_ip *prev;
54885 + struct role_allowed_ip *next;
54886 +};
54887 +
54888 +struct role_transition {
54889 + char *rolename;
54890 +
54891 + struct role_transition *prev;
54892 + struct role_transition *next;
54893 +};
54894 +
54895 +struct acl_role_label {
54896 + char *rolename;
54897 + uid_t uidgid;
54898 + __u16 roletype;
54899 +
54900 + __u16 auth_attempts;
54901 + unsigned long expires;
54902 +
54903 + struct acl_subject_label *root_label;
54904 + struct gr_hash_struct *hash;
54905 +
54906 + struct acl_role_label *prev;
54907 + struct acl_role_label *next;
54908 +
54909 + struct role_transition *transitions;
54910 + struct role_allowed_ip *allowed_ips;
54911 + uid_t *domain_children;
54912 + __u16 domain_child_num;
54913 +
54914 + struct acl_subject_label **subj_hash;
54915 + __u32 subj_hash_size;
54916 +};
54917 +
54918 +struct user_acl_role_db {
54919 + struct acl_role_label **r_table;
54920 + __u32 num_pointers; /* Number of allocations to track */
54921 + __u32 num_roles; /* Number of roles */
54922 + __u32 num_domain_children; /* Number of domain children */
54923 + __u32 num_subjects; /* Number of subjects */
54924 + __u32 num_objects; /* Number of objects */
54925 +};
54926 +
54927 +struct acl_object_label {
54928 + char *filename;
54929 + ino_t inode;
54930 + dev_t device;
54931 + __u32 mode;
54932 +
54933 + struct acl_subject_label *nested;
54934 + struct acl_object_label *globbed;
54935 +
54936 + /* next two structures not used */
54937 +
54938 + struct acl_object_label *prev;
54939 + struct acl_object_label *next;
54940 +};
54941 +
54942 +struct acl_ip_label {
54943 + char *iface;
54944 + __u32 addr;
54945 + __u32 netmask;
54946 + __u16 low, high;
54947 + __u8 mode;
54948 + __u32 type;
54949 + __u32 proto[8];
54950 +
54951 + /* next two structures not used */
54952 +
54953 + struct acl_ip_label *prev;
54954 + struct acl_ip_label *next;
54955 +};
54956 +
54957 +struct gr_arg {
54958 + struct user_acl_role_db role_db;
54959 + unsigned char pw[GR_PW_LEN];
54960 + unsigned char salt[GR_SALT_LEN];
54961 + unsigned char sum[GR_SHA_LEN];
54962 + unsigned char sp_role[GR_SPROLE_LEN];
54963 + struct sprole_pw *sprole_pws;
54964 + dev_t segv_device;
54965 + ino_t segv_inode;
54966 + uid_t segv_uid;
54967 + __u16 num_sprole_pws;
54968 + __u16 mode;
54969 +};
54970 +
54971 +struct gr_arg_wrapper {
54972 + struct gr_arg *arg;
54973 + __u32 version;
54974 + __u32 size;
54975 +};
54976 +
54977 +struct subject_map {
54978 + struct acl_subject_label *user;
54979 + struct acl_subject_label *kernel;
54980 + struct subject_map *prev;
54981 + struct subject_map *next;
54982 +};
54983 +
54984 +struct acl_subj_map_db {
54985 + struct subject_map **s_hash;
54986 + __u32 s_size;
54987 +};
54988 +
54989 +/* End Data Structures Section */
54990 +
54991 +/* Hash functions generated by empirical testing by Brad Spengler
54992 + Makes good use of the low bits of the inode. Generally 0-1 times
54993 + in loop for successful match. 0-3 for unsuccessful match.
54994 + Shift/add algorithm with modulus of table size and an XOR*/
54995 +
54996 +static __inline__ unsigned int
54997 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54998 +{
54999 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55000 +}
55001 +
55002 + static __inline__ unsigned int
55003 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55004 +{
55005 + return ((const unsigned long)userp % sz);
55006 +}
55007 +
55008 +static __inline__ unsigned int
55009 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55010 +{
55011 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55012 +}
55013 +
55014 +static __inline__ unsigned int
55015 +nhash(const char *name, const __u16 len, const unsigned int sz)
55016 +{
55017 + return full_name_hash((const unsigned char *)name, len) % sz;
55018 +}
55019 +
55020 +#define FOR_EACH_ROLE_START(role) \
55021 + role = role_list; \
55022 + while (role) {
55023 +
55024 +#define FOR_EACH_ROLE_END(role) \
55025 + role = role->prev; \
55026 + }
55027 +
55028 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55029 + subj = NULL; \
55030 + iter = 0; \
55031 + while (iter < role->subj_hash_size) { \
55032 + if (subj == NULL) \
55033 + subj = role->subj_hash[iter]; \
55034 + if (subj == NULL) { \
55035 + iter++; \
55036 + continue; \
55037 + }
55038 +
55039 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55040 + subj = subj->next; \
55041 + if (subj == NULL) \
55042 + iter++; \
55043 + }
55044 +
55045 +
55046 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55047 + subj = role->hash->first; \
55048 + while (subj != NULL) {
55049 +
55050 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55051 + subj = subj->next; \
55052 + }
55053 +
55054 +#endif
55055 +
55056 diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55057 --- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55058 +++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55059 @@ -0,0 +1,9 @@
55060 +#ifndef __GRALLOC_H
55061 +#define __GRALLOC_H
55062 +
55063 +void acl_free_all(void);
55064 +int acl_alloc_stack_init(unsigned long size);
55065 +void *acl_alloc(unsigned long len);
55066 +void *acl_alloc_num(unsigned long num, unsigned long len);
55067 +
55068 +#endif
55069 diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55070 --- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55071 +++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
55072 @@ -0,0 +1,140 @@
55073 +#ifndef GRDEFS_H
55074 +#define GRDEFS_H
55075 +
55076 +/* Begin grsecurity status declarations */
55077 +
55078 +enum {
55079 + GR_READY = 0x01,
55080 + GR_STATUS_INIT = 0x00 // disabled state
55081 +};
55082 +
55083 +/* Begin ACL declarations */
55084 +
55085 +/* Role flags */
55086 +
55087 +enum {
55088 + GR_ROLE_USER = 0x0001,
55089 + GR_ROLE_GROUP = 0x0002,
55090 + GR_ROLE_DEFAULT = 0x0004,
55091 + GR_ROLE_SPECIAL = 0x0008,
55092 + GR_ROLE_AUTH = 0x0010,
55093 + GR_ROLE_NOPW = 0x0020,
55094 + GR_ROLE_GOD = 0x0040,
55095 + GR_ROLE_LEARN = 0x0080,
55096 + GR_ROLE_TPE = 0x0100,
55097 + GR_ROLE_DOMAIN = 0x0200,
55098 + GR_ROLE_PAM = 0x0400,
55099 + GR_ROLE_PERSIST = 0x0800
55100 +};
55101 +
55102 +/* ACL Subject and Object mode flags */
55103 +enum {
55104 + GR_DELETED = 0x80000000
55105 +};
55106 +
55107 +/* ACL Object-only mode flags */
55108 +enum {
55109 + GR_READ = 0x00000001,
55110 + GR_APPEND = 0x00000002,
55111 + GR_WRITE = 0x00000004,
55112 + GR_EXEC = 0x00000008,
55113 + GR_FIND = 0x00000010,
55114 + GR_INHERIT = 0x00000020,
55115 + GR_SETID = 0x00000040,
55116 + GR_CREATE = 0x00000080,
55117 + GR_DELETE = 0x00000100,
55118 + GR_LINK = 0x00000200,
55119 + GR_AUDIT_READ = 0x00000400,
55120 + GR_AUDIT_APPEND = 0x00000800,
55121 + GR_AUDIT_WRITE = 0x00001000,
55122 + GR_AUDIT_EXEC = 0x00002000,
55123 + GR_AUDIT_FIND = 0x00004000,
55124 + GR_AUDIT_INHERIT= 0x00008000,
55125 + GR_AUDIT_SETID = 0x00010000,
55126 + GR_AUDIT_CREATE = 0x00020000,
55127 + GR_AUDIT_DELETE = 0x00040000,
55128 + GR_AUDIT_LINK = 0x00080000,
55129 + GR_PTRACERD = 0x00100000,
55130 + GR_NOPTRACE = 0x00200000,
55131 + GR_SUPPRESS = 0x00400000,
55132 + GR_NOLEARN = 0x00800000,
55133 + GR_INIT_TRANSFER= 0x01000000
55134 +};
55135 +
55136 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55137 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55138 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55139 +
55140 +/* ACL subject-only mode flags */
55141 +enum {
55142 + GR_KILL = 0x00000001,
55143 + GR_VIEW = 0x00000002,
55144 + GR_PROTECTED = 0x00000004,
55145 + GR_LEARN = 0x00000008,
55146 + GR_OVERRIDE = 0x00000010,
55147 + /* just a placeholder, this mode is only used in userspace */
55148 + GR_DUMMY = 0x00000020,
55149 + GR_PROTSHM = 0x00000040,
55150 + GR_KILLPROC = 0x00000080,
55151 + GR_KILLIPPROC = 0x00000100,
55152 + /* just a placeholder, this mode is only used in userspace */
55153 + GR_NOTROJAN = 0x00000200,
55154 + GR_PROTPROCFD = 0x00000400,
55155 + GR_PROCACCT = 0x00000800,
55156 + GR_RELAXPTRACE = 0x00001000,
55157 + GR_NESTED = 0x00002000,
55158 + GR_INHERITLEARN = 0x00004000,
55159 + GR_PROCFIND = 0x00008000,
55160 + GR_POVERRIDE = 0x00010000,
55161 + GR_KERNELAUTH = 0x00020000,
55162 + GR_ATSECURE = 0x00040000,
55163 + GR_SHMEXEC = 0x00080000
55164 +};
55165 +
55166 +enum {
55167 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55168 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55169 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55170 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55171 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55172 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55173 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55174 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55175 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55176 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55177 +};
55178 +
55179 +enum {
55180 + GR_ID_USER = 0x01,
55181 + GR_ID_GROUP = 0x02,
55182 +};
55183 +
55184 +enum {
55185 + GR_ID_ALLOW = 0x01,
55186 + GR_ID_DENY = 0x02,
55187 +};
55188 +
55189 +#define GR_CRASH_RES 31
55190 +#define GR_UIDTABLE_MAX 500
55191 +
55192 +/* begin resource learning section */
55193 +enum {
55194 + GR_RLIM_CPU_BUMP = 60,
55195 + GR_RLIM_FSIZE_BUMP = 50000,
55196 + GR_RLIM_DATA_BUMP = 10000,
55197 + GR_RLIM_STACK_BUMP = 1000,
55198 + GR_RLIM_CORE_BUMP = 10000,
55199 + GR_RLIM_RSS_BUMP = 500000,
55200 + GR_RLIM_NPROC_BUMP = 1,
55201 + GR_RLIM_NOFILE_BUMP = 5,
55202 + GR_RLIM_MEMLOCK_BUMP = 50000,
55203 + GR_RLIM_AS_BUMP = 500000,
55204 + GR_RLIM_LOCKS_BUMP = 2,
55205 + GR_RLIM_SIGPENDING_BUMP = 5,
55206 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55207 + GR_RLIM_NICE_BUMP = 1,
55208 + GR_RLIM_RTPRIO_BUMP = 1,
55209 + GR_RLIM_RTTIME_BUMP = 1000000
55210 +};
55211 +
55212 +#endif
55213 diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55214 --- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55215 +++ linux-3.0.4/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
55216 @@ -0,0 +1,219 @@
55217 +#ifndef __GRINTERNAL_H
55218 +#define __GRINTERNAL_H
55219 +
55220 +#ifdef CONFIG_GRKERNSEC
55221 +
55222 +#include <linux/fs.h>
55223 +#include <linux/mnt_namespace.h>
55224 +#include <linux/nsproxy.h>
55225 +#include <linux/gracl.h>
55226 +#include <linux/grdefs.h>
55227 +#include <linux/grmsg.h>
55228 +
55229 +void gr_add_learn_entry(const char *fmt, ...)
55230 + __attribute__ ((format (printf, 1, 2)));
55231 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55232 + const struct vfsmount *mnt);
55233 +__u32 gr_check_create(const struct dentry *new_dentry,
55234 + const struct dentry *parent,
55235 + const struct vfsmount *mnt, const __u32 mode);
55236 +int gr_check_protected_task(const struct task_struct *task);
55237 +__u32 to_gr_audit(const __u32 reqmode);
55238 +int gr_set_acls(const int type);
55239 +int gr_apply_subject_to_task(struct task_struct *task);
55240 +int gr_acl_is_enabled(void);
55241 +char gr_roletype_to_char(void);
55242 +
55243 +void gr_handle_alertkill(struct task_struct *task);
55244 +char *gr_to_filename(const struct dentry *dentry,
55245 + const struct vfsmount *mnt);
55246 +char *gr_to_filename1(const struct dentry *dentry,
55247 + const struct vfsmount *mnt);
55248 +char *gr_to_filename2(const struct dentry *dentry,
55249 + const struct vfsmount *mnt);
55250 +char *gr_to_filename3(const struct dentry *dentry,
55251 + const struct vfsmount *mnt);
55252 +
55253 +extern int grsec_enable_harden_ptrace;
55254 +extern int grsec_enable_link;
55255 +extern int grsec_enable_fifo;
55256 +extern int grsec_enable_execve;
55257 +extern int grsec_enable_shm;
55258 +extern int grsec_enable_execlog;
55259 +extern int grsec_enable_signal;
55260 +extern int grsec_enable_audit_ptrace;
55261 +extern int grsec_enable_forkfail;
55262 +extern int grsec_enable_time;
55263 +extern int grsec_enable_rofs;
55264 +extern int grsec_enable_chroot_shmat;
55265 +extern int grsec_enable_chroot_mount;
55266 +extern int grsec_enable_chroot_double;
55267 +extern int grsec_enable_chroot_pivot;
55268 +extern int grsec_enable_chroot_chdir;
55269 +extern int grsec_enable_chroot_chmod;
55270 +extern int grsec_enable_chroot_mknod;
55271 +extern int grsec_enable_chroot_fchdir;
55272 +extern int grsec_enable_chroot_nice;
55273 +extern int grsec_enable_chroot_execlog;
55274 +extern int grsec_enable_chroot_caps;
55275 +extern int grsec_enable_chroot_sysctl;
55276 +extern int grsec_enable_chroot_unix;
55277 +extern int grsec_enable_tpe;
55278 +extern int grsec_tpe_gid;
55279 +extern int grsec_enable_tpe_all;
55280 +extern int grsec_enable_tpe_invert;
55281 +extern int grsec_enable_socket_all;
55282 +extern int grsec_socket_all_gid;
55283 +extern int grsec_enable_socket_client;
55284 +extern int grsec_socket_client_gid;
55285 +extern int grsec_enable_socket_server;
55286 +extern int grsec_socket_server_gid;
55287 +extern int grsec_audit_gid;
55288 +extern int grsec_enable_group;
55289 +extern int grsec_enable_audit_textrel;
55290 +extern int grsec_enable_log_rwxmaps;
55291 +extern int grsec_enable_mount;
55292 +extern int grsec_enable_chdir;
55293 +extern int grsec_resource_logging;
55294 +extern int grsec_enable_blackhole;
55295 +extern int grsec_lastack_retries;
55296 +extern int grsec_enable_brute;
55297 +extern int grsec_lock;
55298 +
55299 +extern spinlock_t grsec_alert_lock;
55300 +extern unsigned long grsec_alert_wtime;
55301 +extern unsigned long grsec_alert_fyet;
55302 +
55303 +extern spinlock_t grsec_audit_lock;
55304 +
55305 +extern rwlock_t grsec_exec_file_lock;
55306 +
55307 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55308 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55309 + (tsk)->exec_file->f_vfsmnt) : "/")
55310 +
55311 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55312 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55313 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55314 +
55315 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55316 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55317 + (tsk)->exec_file->f_vfsmnt) : "/")
55318 +
55319 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55320 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55321 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55322 +
55323 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55324 +
55325 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55326 +
55327 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55328 + (task)->pid, (cred)->uid, \
55329 + (cred)->euid, (cred)->gid, (cred)->egid, \
55330 + gr_parent_task_fullpath(task), \
55331 + (task)->real_parent->comm, (task)->real_parent->pid, \
55332 + (pcred)->uid, (pcred)->euid, \
55333 + (pcred)->gid, (pcred)->egid
55334 +
55335 +#define GR_CHROOT_CAPS {{ \
55336 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55337 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55338 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55339 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55340 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55341 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55342 +
55343 +#define security_learn(normal_msg,args...) \
55344 +({ \
55345 + read_lock(&grsec_exec_file_lock); \
55346 + gr_add_learn_entry(normal_msg "\n", ## args); \
55347 + read_unlock(&grsec_exec_file_lock); \
55348 +})
55349 +
55350 +enum {
55351 + GR_DO_AUDIT,
55352 + GR_DONT_AUDIT,
55353 + /* used for non-audit messages that we shouldn't kill the task on */
55354 + GR_DONT_AUDIT_GOOD
55355 +};
55356 +
55357 +enum {
55358 + GR_TTYSNIFF,
55359 + GR_RBAC,
55360 + GR_RBAC_STR,
55361 + GR_STR_RBAC,
55362 + GR_RBAC_MODE2,
55363 + GR_RBAC_MODE3,
55364 + GR_FILENAME,
55365 + GR_SYSCTL_HIDDEN,
55366 + GR_NOARGS,
55367 + GR_ONE_INT,
55368 + GR_ONE_INT_TWO_STR,
55369 + GR_ONE_STR,
55370 + GR_STR_INT,
55371 + GR_TWO_STR_INT,
55372 + GR_TWO_INT,
55373 + GR_TWO_U64,
55374 + GR_THREE_INT,
55375 + GR_FIVE_INT_TWO_STR,
55376 + GR_TWO_STR,
55377 + GR_THREE_STR,
55378 + GR_FOUR_STR,
55379 + GR_STR_FILENAME,
55380 + GR_FILENAME_STR,
55381 + GR_FILENAME_TWO_INT,
55382 + GR_FILENAME_TWO_INT_STR,
55383 + GR_TEXTREL,
55384 + GR_PTRACE,
55385 + GR_RESOURCE,
55386 + GR_CAP,
55387 + GR_SIG,
55388 + GR_SIG2,
55389 + GR_CRASH1,
55390 + GR_CRASH2,
55391 + GR_PSACCT,
55392 + GR_RWXMAP
55393 +};
55394 +
55395 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55396 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55397 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55398 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55399 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55400 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55401 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55402 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55403 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55404 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55405 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55406 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55407 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55408 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55409 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55410 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55411 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55412 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55413 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55414 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55415 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55416 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55417 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55418 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55419 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55420 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55421 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55422 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55423 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55424 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55425 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55426 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55427 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55428 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55429 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55430 +
55431 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55432 +
55433 +#endif
55434 +
55435 +#endif
55436 diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
55437 --- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55438 +++ linux-3.0.4/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
55439 @@ -0,0 +1,107 @@
55440 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55441 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55442 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55443 +#define GR_STOPMOD_MSG "denied modification of module state by "
55444 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55445 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55446 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55447 +#define GR_IOPL_MSG "denied use of iopl() by "
55448 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55449 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55450 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55451 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55452 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55453 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55454 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55455 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55456 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55457 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55458 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55459 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55460 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55461 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55462 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55463 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55464 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55465 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55466 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55467 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55468 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55469 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55470 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55471 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55472 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55473 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55474 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55475 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55476 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55477 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55478 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55479 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55480 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55481 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55482 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55483 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55484 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55485 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55486 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55487 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55488 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55489 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55490 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55491 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55492 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55493 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55494 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55495 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55496 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55497 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55498 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55499 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55500 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55501 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55502 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55503 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55504 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55505 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55506 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55507 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55508 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55509 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55510 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55511 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55512 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55513 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55514 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55515 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55516 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55517 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55518 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55519 +#define GR_TIME_MSG "time set by "
55520 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55521 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55522 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55523 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55524 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55525 +#define GR_BIND_MSG "denied bind() by "
55526 +#define GR_CONNECT_MSG "denied connect() by "
55527 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55528 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55529 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55530 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55531 +#define GR_CAP_ACL_MSG "use of %s denied for "
55532 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55533 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55534 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55535 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55536 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55537 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55538 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55539 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55540 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55541 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55542 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55543 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55544 +#define GR_VM86_MSG "denied use of vm86 by "
55545 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55546 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55547 diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
55548 --- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55549 +++ linux-3.0.4/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
55550 @@ -0,0 +1,227 @@
55551 +#ifndef GR_SECURITY_H
55552 +#define GR_SECURITY_H
55553 +#include <linux/fs.h>
55554 +#include <linux/fs_struct.h>
55555 +#include <linux/binfmts.h>
55556 +#include <linux/gracl.h>
55557 +
55558 +/* notify of brain-dead configs */
55559 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55560 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
55561 +#endif
55562 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55563 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55564 +#endif
55565 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55566 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55567 +#endif
55568 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55569 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55570 +#endif
55571 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55572 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55573 +#endif
55574 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55575 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55576 +#endif
55577 +
55578 +#include <linux/compat.h>
55579 +
55580 +struct user_arg_ptr {
55581 +#ifdef CONFIG_COMPAT
55582 + bool is_compat;
55583 +#endif
55584 + union {
55585 + const char __user *const __user *native;
55586 +#ifdef CONFIG_COMPAT
55587 + compat_uptr_t __user *compat;
55588 +#endif
55589 + } ptr;
55590 +};
55591 +
55592 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55593 +void gr_handle_brute_check(void);
55594 +void gr_handle_kernel_exploit(void);
55595 +int gr_process_user_ban(void);
55596 +
55597 +char gr_roletype_to_char(void);
55598 +
55599 +int gr_acl_enable_at_secure(void);
55600 +
55601 +int gr_check_user_change(int real, int effective, int fs);
55602 +int gr_check_group_change(int real, int effective, int fs);
55603 +
55604 +void gr_del_task_from_ip_table(struct task_struct *p);
55605 +
55606 +int gr_pid_is_chrooted(struct task_struct *p);
55607 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55608 +int gr_handle_chroot_nice(void);
55609 +int gr_handle_chroot_sysctl(const int op);
55610 +int gr_handle_chroot_setpriority(struct task_struct *p,
55611 + const int niceval);
55612 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55613 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55614 + const struct vfsmount *mnt);
55615 +int gr_handle_chroot_caps(struct path *path);
55616 +void gr_handle_chroot_chdir(struct path *path);
55617 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55618 + const struct vfsmount *mnt, const int mode);
55619 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55620 + const struct vfsmount *mnt, const int mode);
55621 +int gr_handle_chroot_mount(const struct dentry *dentry,
55622 + const struct vfsmount *mnt,
55623 + const char *dev_name);
55624 +int gr_handle_chroot_pivot(void);
55625 +int gr_handle_chroot_unix(const pid_t pid);
55626 +
55627 +int gr_handle_rawio(const struct inode *inode);
55628 +
55629 +void gr_handle_ioperm(void);
55630 +void gr_handle_iopl(void);
55631 +
55632 +int gr_tpe_allow(const struct file *file);
55633 +
55634 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55635 +void gr_clear_chroot_entries(struct task_struct *task);
55636 +
55637 +void gr_log_forkfail(const int retval);
55638 +void gr_log_timechange(void);
55639 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55640 +void gr_log_chdir(const struct dentry *dentry,
55641 + const struct vfsmount *mnt);
55642 +void gr_log_chroot_exec(const struct dentry *dentry,
55643 + const struct vfsmount *mnt);
55644 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
55645 +void gr_log_remount(const char *devname, const int retval);
55646 +void gr_log_unmount(const char *devname, const int retval);
55647 +void gr_log_mount(const char *from, const char *to, const int retval);
55648 +void gr_log_textrel(struct vm_area_struct *vma);
55649 +void gr_log_rwxmmap(struct file *file);
55650 +void gr_log_rwxmprotect(struct file *file);
55651 +
55652 +int gr_handle_follow_link(const struct inode *parent,
55653 + const struct inode *inode,
55654 + const struct dentry *dentry,
55655 + const struct vfsmount *mnt);
55656 +int gr_handle_fifo(const struct dentry *dentry,
55657 + const struct vfsmount *mnt,
55658 + const struct dentry *dir, const int flag,
55659 + const int acc_mode);
55660 +int gr_handle_hardlink(const struct dentry *dentry,
55661 + const struct vfsmount *mnt,
55662 + struct inode *inode,
55663 + const int mode, const char *to);
55664 +
55665 +int gr_is_capable(const int cap);
55666 +int gr_is_capable_nolog(const int cap);
55667 +void gr_learn_resource(const struct task_struct *task, const int limit,
55668 + const unsigned long wanted, const int gt);
55669 +void gr_copy_label(struct task_struct *tsk);
55670 +void gr_handle_crash(struct task_struct *task, const int sig);
55671 +int gr_handle_signal(const struct task_struct *p, const int sig);
55672 +int gr_check_crash_uid(const uid_t uid);
55673 +int gr_check_protected_task(const struct task_struct *task);
55674 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55675 +int gr_acl_handle_mmap(const struct file *file,
55676 + const unsigned long prot);
55677 +int gr_acl_handle_mprotect(const struct file *file,
55678 + const unsigned long prot);
55679 +int gr_check_hidden_task(const struct task_struct *tsk);
55680 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55681 + const struct vfsmount *mnt);
55682 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55683 + const struct vfsmount *mnt);
55684 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55685 + const struct vfsmount *mnt, const int fmode);
55686 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55687 + const struct vfsmount *mnt, mode_t mode);
55688 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55689 + const struct vfsmount *mnt, mode_t mode);
55690 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55691 + const struct vfsmount *mnt);
55692 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55693 + const struct vfsmount *mnt);
55694 +int gr_handle_ptrace(struct task_struct *task, const long request);
55695 +int gr_handle_proc_ptrace(struct task_struct *task);
55696 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55697 + const struct vfsmount *mnt);
55698 +int gr_check_crash_exec(const struct file *filp);
55699 +int gr_acl_is_enabled(void);
55700 +void gr_set_kernel_label(struct task_struct *task);
55701 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55702 + const gid_t gid);
55703 +int gr_set_proc_label(const struct dentry *dentry,
55704 + const struct vfsmount *mnt,
55705 + const int unsafe_share);
55706 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55707 + const struct vfsmount *mnt);
55708 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55709 + const struct vfsmount *mnt, const int fmode);
55710 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55711 + const struct dentry *p_dentry,
55712 + const struct vfsmount *p_mnt, const int fmode,
55713 + const int imode);
55714 +void gr_handle_create(const struct dentry *dentry,
55715 + const struct vfsmount *mnt);
55716 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55717 + const struct dentry *parent_dentry,
55718 + const struct vfsmount *parent_mnt,
55719 + const int mode);
55720 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55721 + const struct dentry *parent_dentry,
55722 + const struct vfsmount *parent_mnt);
55723 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55724 + const struct vfsmount *mnt);
55725 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55726 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55727 + const struct vfsmount *mnt);
55728 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55729 + const struct dentry *parent_dentry,
55730 + const struct vfsmount *parent_mnt,
55731 + const char *from);
55732 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55733 + const struct dentry *parent_dentry,
55734 + const struct vfsmount *parent_mnt,
55735 + const struct dentry *old_dentry,
55736 + const struct vfsmount *old_mnt, const char *to);
55737 +int gr_acl_handle_rename(struct dentry *new_dentry,
55738 + struct dentry *parent_dentry,
55739 + const struct vfsmount *parent_mnt,
55740 + struct dentry *old_dentry,
55741 + struct inode *old_parent_inode,
55742 + struct vfsmount *old_mnt, const char *newname);
55743 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55744 + struct dentry *old_dentry,
55745 + struct dentry *new_dentry,
55746 + struct vfsmount *mnt, const __u8 replace);
55747 +__u32 gr_check_link(const struct dentry *new_dentry,
55748 + const struct dentry *parent_dentry,
55749 + const struct vfsmount *parent_mnt,
55750 + const struct dentry *old_dentry,
55751 + const struct vfsmount *old_mnt);
55752 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55753 + const unsigned int namelen, const ino_t ino);
55754 +
55755 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55756 + const struct vfsmount *mnt);
55757 +void gr_acl_handle_exit(void);
55758 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55759 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55760 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55761 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55762 +void gr_audit_ptrace(struct task_struct *task);
55763 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55764 +
55765 +#ifdef CONFIG_GRKERNSEC
55766 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55767 +void gr_handle_vm86(void);
55768 +void gr_handle_mem_readwrite(u64 from, u64 to);
55769 +
55770 +extern int grsec_enable_dmesg;
55771 +extern int grsec_disable_privio;
55772 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55773 +extern int grsec_enable_chroot_findtask;
55774 +#endif
55775 +#endif
55776 +
55777 +#endif
55778 diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
55779 --- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
55780 +++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
55781 @@ -0,0 +1,19 @@
55782 +#ifndef __GRSOCK_H
55783 +#define __GRSOCK_H
55784 +
55785 +extern void gr_attach_curr_ip(const struct sock *sk);
55786 +extern int gr_handle_sock_all(const int family, const int type,
55787 + const int protocol);
55788 +extern int gr_handle_sock_server(const struct sockaddr *sck);
55789 +extern int gr_handle_sock_server_other(const struct sock *sck);
55790 +extern int gr_handle_sock_client(const struct sockaddr *sck);
55791 +extern int gr_search_connect(struct socket * sock,
55792 + struct sockaddr_in * addr);
55793 +extern int gr_search_bind(struct socket * sock,
55794 + struct sockaddr_in * addr);
55795 +extern int gr_search_listen(struct socket * sock);
55796 +extern int gr_search_accept(struct socket * sock);
55797 +extern int gr_search_socket(const int domain, const int type,
55798 + const int protocol);
55799 +
55800 +#endif
55801 diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
55802 --- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
55803 +++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
55804 @@ -675,7 +675,7 @@ struct hid_ll_driver {
55805 unsigned int code, int value);
55806
55807 int (*parse)(struct hid_device *hdev);
55808 -};
55809 +} __no_const;
55810
55811 #define PM_HINT_FULLON 1<<5
55812 #define PM_HINT_NORMAL 1<<1
55813 diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
55814 --- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
55815 +++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
55816 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
55817 kunmap_atomic(kaddr, KM_USER0);
55818 }
55819
55820 +static inline void sanitize_highpage(struct page *page)
55821 +{
55822 + void *kaddr;
55823 + unsigned long flags;
55824 +
55825 + local_irq_save(flags);
55826 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55827 + clear_page(kaddr);
55828 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55829 + local_irq_restore(flags);
55830 +}
55831 +
55832 static inline void zero_user_segments(struct page *page,
55833 unsigned start1, unsigned end1,
55834 unsigned start2, unsigned end2)
55835 diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
55836 --- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
55837 +++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
55838 @@ -346,6 +346,7 @@ struct i2c_algorithm {
55839 /* To determine what the adapter supports */
55840 u32 (*functionality) (struct i2c_adapter *);
55841 };
55842 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
55843
55844 /*
55845 * i2c_adapter is the structure used to identify a physical i2c bus along
55846 diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
55847 --- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
55848 +++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
55849 @@ -564,7 +564,7 @@ struct i2o_controller {
55850 struct i2o_device *exec; /* Executive */
55851 #if BITS_PER_LONG == 64
55852 spinlock_t context_list_lock; /* lock for context_list */
55853 - atomic_t context_list_counter; /* needed for unique contexts */
55854 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55855 struct list_head context_list; /* list of context id's
55856 and pointers */
55857 #endif
55858 diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
55859 --- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
55860 +++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
55861 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
55862
55863 /* Each module must use one module_init(). */
55864 #define module_init(initfn) \
55865 - static inline initcall_t __inittest(void) \
55866 + static inline __used initcall_t __inittest(void) \
55867 { return initfn; } \
55868 int init_module(void) __attribute__((alias(#initfn)));
55869
55870 /* This is only required if you want to be unloadable. */
55871 #define module_exit(exitfn) \
55872 - static inline exitcall_t __exittest(void) \
55873 + static inline __used exitcall_t __exittest(void) \
55874 { return exitfn; } \
55875 void cleanup_module(void) __attribute__((alias(#exitfn)));
55876
55877 diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
55878 --- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
55879 +++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
55880 @@ -126,6 +126,12 @@ extern struct cred init_cred;
55881 # define INIT_PERF_EVENTS(tsk)
55882 #endif
55883
55884 +#ifdef CONFIG_X86
55885 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55886 +#else
55887 +#define INIT_TASK_THREAD_INFO
55888 +#endif
55889 +
55890 /*
55891 * INIT_TASK is used to set up the first task table, touch at
55892 * your own risk!. Base=0, limit=0x1fffff (=2MB)
55893 @@ -164,6 +170,7 @@ extern struct cred init_cred;
55894 RCU_INIT_POINTER(.cred, &init_cred), \
55895 .comm = "swapper", \
55896 .thread = INIT_THREAD, \
55897 + INIT_TASK_THREAD_INFO \
55898 .fs = &init_fs, \
55899 .files = &init_files, \
55900 .signal = &init_signals, \
55901 diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
55902 --- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
55903 +++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
55904 @@ -296,7 +296,7 @@ struct iommu_flush {
55905 u8 fm, u64 type);
55906 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
55907 unsigned int size_order, u64 type);
55908 -};
55909 +} __no_const;
55910
55911 enum {
55912 SR_DMAR_FECTL_REG,
55913 diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
55914 --- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
55915 +++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
55916 @@ -422,7 +422,7 @@ enum
55917 /* map softirq index to softirq name. update 'softirq_to_name' in
55918 * kernel/softirq.c when adding a new softirq.
55919 */
55920 -extern char *softirq_to_name[NR_SOFTIRQS];
55921 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55922
55923 /* softirq mask and active fields moved to irq_cpustat_t in
55924 * asm/hardirq.h to get better cache usage. KAO
55925 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55926
55927 struct softirq_action
55928 {
55929 - void (*action)(struct softirq_action *);
55930 + void (*action)(void);
55931 };
55932
55933 asmlinkage void do_softirq(void);
55934 asmlinkage void __do_softirq(void);
55935 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55936 +extern void open_softirq(int nr, void (*action)(void));
55937 extern void softirq_init(void);
55938 static inline void __raise_softirq_irqoff(unsigned int nr)
55939 {
55940 diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
55941 --- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
55942 +++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
55943 @@ -15,7 +15,8 @@
55944
55945 struct module;
55946
55947 -#ifdef CONFIG_KALLSYMS
55948 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55949 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55950 /* Lookup the address for a symbol. Returns 0 if not found. */
55951 unsigned long kallsyms_lookup_name(const char *name);
55952
55953 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
55954 /* Stupid that this does nothing, but I didn't create this mess. */
55955 #define __print_symbol(fmt, addr)
55956 #endif /*CONFIG_KALLSYMS*/
55957 +#else /* when included by kallsyms.c, vsnprintf.c, or
55958 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55959 +extern void __print_symbol(const char *fmt, unsigned long address);
55960 +extern int sprint_backtrace(char *buffer, unsigned long address);
55961 +extern int sprint_symbol(char *buffer, unsigned long address);
55962 +const char *kallsyms_lookup(unsigned long addr,
55963 + unsigned long *symbolsize,
55964 + unsigned long *offset,
55965 + char **modname, char *namebuf);
55966 +#endif
55967
55968 /* This macro allows us to keep printk typechecking */
55969 static void __check_printsym_format(const char *fmt, ...)
55970 diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
55971 --- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
55972 +++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
55973 @@ -53,7 +53,7 @@ extern int kgdb_connected;
55974 extern int kgdb_io_module_registered;
55975
55976 extern atomic_t kgdb_setting_breakpoint;
55977 -extern atomic_t kgdb_cpu_doing_single_step;
55978 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55979
55980 extern struct task_struct *kgdb_usethread;
55981 extern struct task_struct *kgdb_contthread;
55982 @@ -251,7 +251,7 @@ struct kgdb_arch {
55983 void (*disable_hw_break)(struct pt_regs *regs);
55984 void (*remove_all_hw_break)(void);
55985 void (*correct_hw_break)(void);
55986 -};
55987 +} __do_const;
55988
55989 /**
55990 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
55991 @@ -276,7 +276,7 @@ struct kgdb_io {
55992 void (*pre_exception) (void);
55993 void (*post_exception) (void);
55994 int is_console;
55995 -};
55996 +} __do_const;
55997
55998 extern struct kgdb_arch arch_kgdb_ops;
55999
56000 diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
56001 --- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
56002 +++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
56003 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
56004 * usually useless though. */
56005 extern int __request_module(bool wait, const char *name, ...) \
56006 __attribute__((format(printf, 2, 3)));
56007 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56008 + __attribute__((format(printf, 3, 4)));
56009 #define request_module(mod...) __request_module(true, mod)
56010 #define request_module_nowait(mod...) __request_module(false, mod)
56011 #define try_then_request_module(x, mod...) \
56012 diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56013 --- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
56014 +++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
56015 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56016 void vcpu_load(struct kvm_vcpu *vcpu);
56017 void vcpu_put(struct kvm_vcpu *vcpu);
56018
56019 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56020 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56021 struct module *module);
56022 void kvm_exit(void);
56023
56024 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56025 struct kvm_guest_debug *dbg);
56026 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56027
56028 -int kvm_arch_init(void *opaque);
56029 +int kvm_arch_init(const void *opaque);
56030 void kvm_arch_exit(void);
56031
56032 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56033 diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56034 --- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
56035 +++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
56036 @@ -899,7 +899,7 @@ struct ata_port_operations {
56037 * fields must be pointers.
56038 */
56039 const struct ata_port_operations *inherits;
56040 -};
56041 +} __do_const;
56042
56043 struct ata_port_info {
56044 unsigned long flags;
56045 diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56046 --- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
56047 +++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
56048 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56049 int region);
56050 void * (*mca_transform_memory)(struct mca_device *,
56051 void *memory);
56052 -};
56053 +} __no_const;
56054
56055 struct mca_bus {
56056 u64 default_dma_mask;
56057 diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56058 --- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
56059 +++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
56060 @@ -144,7 +144,7 @@ struct memory_accessor {
56061 size_t count);
56062 ssize_t (*write)(struct memory_accessor *, const char *buf,
56063 off_t offset, size_t count);
56064 -};
56065 +} __no_const;
56066
56067 /*
56068 * Kernel text modification mutex, used for code patching. Users of this lock
56069 diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56070 --- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
56071 +++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
56072 @@ -234,6 +234,7 @@ struct abx500_ops {
56073 int (*event_registers_startup_state_get) (struct device *, u8 *);
56074 int (*startup_irq_enabled) (struct device *, unsigned int);
56075 };
56076 +typedef struct abx500_ops __no_const abx500_ops_no_const;
56077
56078 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56079 void abx500_remove_ops(struct device *dev);
56080 diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56081 --- linux-3.0.4/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
56082 +++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
56083 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
56084
56085 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56086 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56087 +
56088 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56089 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56090 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56091 +#else
56092 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56093 +#endif
56094 +
56095 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56096 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56097
56098 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56099 int set_page_dirty_lock(struct page *page);
56100 int clear_page_dirty_for_io(struct page *page);
56101
56102 -/* Is the vma a continuation of the stack vma above it? */
56103 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56104 -{
56105 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56106 -}
56107 -
56108 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
56109 - unsigned long addr)
56110 -{
56111 - return (vma->vm_flags & VM_GROWSDOWN) &&
56112 - (vma->vm_start == addr) &&
56113 - !vma_growsdown(vma->vm_prev, addr);
56114 -}
56115 -
56116 -/* Is the vma a continuation of the stack vma below it? */
56117 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56118 -{
56119 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56120 -}
56121 -
56122 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
56123 - unsigned long addr)
56124 -{
56125 - return (vma->vm_flags & VM_GROWSUP) &&
56126 - (vma->vm_end == addr) &&
56127 - !vma_growsup(vma->vm_next, addr);
56128 -}
56129 -
56130 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56131 unsigned long old_addr, struct vm_area_struct *new_vma,
56132 unsigned long new_addr, unsigned long len);
56133 @@ -1169,6 +1148,15 @@ struct shrinker {
56134 extern void register_shrinker(struct shrinker *);
56135 extern void unregister_shrinker(struct shrinker *);
56136
56137 +#ifdef CONFIG_MMU
56138 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56139 +#else
56140 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56141 +{
56142 + return __pgprot(0);
56143 +}
56144 +#endif
56145 +
56146 int vma_wants_writenotify(struct vm_area_struct *vma);
56147
56148 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56149 @@ -1452,6 +1440,7 @@ out:
56150 }
56151
56152 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56153 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56154
56155 extern unsigned long do_brk(unsigned long, unsigned long);
56156
56157 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56158 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56159 struct vm_area_struct **pprev);
56160
56161 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56162 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56163 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56164 +
56165 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56166 NULL if none. Assume start_addr < end_addr. */
56167 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56168 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56169 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56170 }
56171
56172 -#ifdef CONFIG_MMU
56173 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56174 -#else
56175 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56176 -{
56177 - return __pgprot(0);
56178 -}
56179 -#endif
56180 -
56181 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56182 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56183 unsigned long pfn, unsigned long size, pgprot_t);
56184 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56185 extern int sysctl_memory_failure_early_kill;
56186 extern int sysctl_memory_failure_recovery;
56187 extern void shake_page(struct page *p, int access);
56188 -extern atomic_long_t mce_bad_pages;
56189 +extern atomic_long_unchecked_t mce_bad_pages;
56190 extern int soft_offline_page(struct page *page, int flags);
56191
56192 extern void dump_page(struct page *page);
56193 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56194 unsigned int pages_per_huge_page);
56195 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56196
56197 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56198 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56199 +#else
56200 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56201 +#endif
56202 +
56203 #endif /* __KERNEL__ */
56204 #endif /* _LINUX_MM_H */
56205 diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56206 --- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
56207 +++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
56208 @@ -184,6 +184,8 @@ struct vm_area_struct {
56209 #ifdef CONFIG_NUMA
56210 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56211 #endif
56212 +
56213 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56214 };
56215
56216 struct core_thread {
56217 @@ -316,6 +318,24 @@ struct mm_struct {
56218 #ifdef CONFIG_CPUMASK_OFFSTACK
56219 struct cpumask cpumask_allocation;
56220 #endif
56221 +
56222 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56223 + unsigned long pax_flags;
56224 +#endif
56225 +
56226 +#ifdef CONFIG_PAX_DLRESOLVE
56227 + unsigned long call_dl_resolve;
56228 +#endif
56229 +
56230 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56231 + unsigned long call_syscall;
56232 +#endif
56233 +
56234 +#ifdef CONFIG_PAX_ASLR
56235 + unsigned long delta_mmap; /* randomized offset */
56236 + unsigned long delta_stack; /* randomized offset */
56237 +#endif
56238 +
56239 };
56240
56241 static inline void mm_init_cpumask(struct mm_struct *mm)
56242 diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56243 --- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
56244 +++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
56245 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56246 */
56247 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56248 ({ \
56249 - pte_t __pte; \
56250 + pte_t ___pte; \
56251 struct vm_area_struct *___vma = __vma; \
56252 unsigned long ___address = __address; \
56253 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56254 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56255 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56256 - __pte; \
56257 + ___pte; \
56258 })
56259
56260 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
56261 diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56262 --- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
56263 +++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
56264 @@ -350,7 +350,7 @@ struct zone {
56265 unsigned long flags; /* zone flags, see below */
56266
56267 /* Zone statistics */
56268 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56269 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56270
56271 /*
56272 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56273 diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56274 --- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56275 +++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56276 @@ -12,7 +12,7 @@
56277 typedef unsigned long kernel_ulong_t;
56278 #endif
56279
56280 -#define PCI_ANY_ID (~0)
56281 +#define PCI_ANY_ID ((__u16)~0)
56282
56283 struct pci_device_id {
56284 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56285 @@ -131,7 +131,7 @@ struct usb_device_id {
56286 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56287 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56288
56289 -#define HID_ANY_ID (~0)
56290 +#define HID_ANY_ID (~0U)
56291
56292 struct hid_device_id {
56293 __u16 bus;
56294 diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56295 --- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
56296 +++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
56297 @@ -16,6 +16,7 @@
56298 #include <linux/kobject.h>
56299 #include <linux/moduleparam.h>
56300 #include <linux/tracepoint.h>
56301 +#include <linux/fs.h>
56302
56303 #include <linux/percpu.h>
56304 #include <asm/module.h>
56305 @@ -325,19 +326,16 @@ struct module
56306 int (*init)(void);
56307
56308 /* If this is non-NULL, vfree after init() returns */
56309 - void *module_init;
56310 + void *module_init_rx, *module_init_rw;
56311
56312 /* Here is the actual code + data, vfree'd on unload. */
56313 - void *module_core;
56314 + void *module_core_rx, *module_core_rw;
56315
56316 /* Here are the sizes of the init and core sections */
56317 - unsigned int init_size, core_size;
56318 + unsigned int init_size_rw, core_size_rw;
56319
56320 /* The size of the executable code in each section. */
56321 - unsigned int init_text_size, core_text_size;
56322 -
56323 - /* Size of RO sections of the module (text+rodata) */
56324 - unsigned int init_ro_size, core_ro_size;
56325 + unsigned int init_size_rx, core_size_rx;
56326
56327 /* Arch-specific module values */
56328 struct mod_arch_specific arch;
56329 @@ -393,6 +391,10 @@ struct module
56330 #ifdef CONFIG_EVENT_TRACING
56331 struct ftrace_event_call **trace_events;
56332 unsigned int num_trace_events;
56333 + struct file_operations trace_id;
56334 + struct file_operations trace_enable;
56335 + struct file_operations trace_format;
56336 + struct file_operations trace_filter;
56337 #endif
56338 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56339 unsigned int num_ftrace_callsites;
56340 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56341 bool is_module_percpu_address(unsigned long addr);
56342 bool is_module_text_address(unsigned long addr);
56343
56344 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56345 +{
56346 +
56347 +#ifdef CONFIG_PAX_KERNEXEC
56348 + if (ktla_ktva(addr) >= (unsigned long)start &&
56349 + ktla_ktva(addr) < (unsigned long)start + size)
56350 + return 1;
56351 +#endif
56352 +
56353 + return ((void *)addr >= start && (void *)addr < start + size);
56354 +}
56355 +
56356 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56357 +{
56358 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56359 +}
56360 +
56361 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56362 +{
56363 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56364 +}
56365 +
56366 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56367 +{
56368 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56369 +}
56370 +
56371 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56372 +{
56373 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56374 +}
56375 +
56376 static inline int within_module_core(unsigned long addr, struct module *mod)
56377 {
56378 - return (unsigned long)mod->module_core <= addr &&
56379 - addr < (unsigned long)mod->module_core + mod->core_size;
56380 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56381 }
56382
56383 static inline int within_module_init(unsigned long addr, struct module *mod)
56384 {
56385 - return (unsigned long)mod->module_init <= addr &&
56386 - addr < (unsigned long)mod->module_init + mod->init_size;
56387 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56388 }
56389
56390 /* Search for module by name: must hold module_mutex. */
56391 diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56392 --- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
56393 +++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
56394 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56395 sections. Returns NULL on failure. */
56396 void *module_alloc(unsigned long size);
56397
56398 +#ifdef CONFIG_PAX_KERNEXEC
56399 +void *module_alloc_exec(unsigned long size);
56400 +#else
56401 +#define module_alloc_exec(x) module_alloc(x)
56402 +#endif
56403 +
56404 /* Free memory returned from module_alloc. */
56405 void module_free(struct module *mod, void *module_region);
56406
56407 +#ifdef CONFIG_PAX_KERNEXEC
56408 +void module_free_exec(struct module *mod, void *module_region);
56409 +#else
56410 +#define module_free_exec(x, y) module_free((x), (y))
56411 +#endif
56412 +
56413 /* Apply the given relocation to the (simplified) ELF. Return -error
56414 or 0. */
56415 int apply_relocate(Elf_Shdr *sechdrs,
56416 diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
56417 --- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
56418 +++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
56419 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
56420 * @len is usually just sizeof(string).
56421 */
56422 #define module_param_string(name, string, len, perm) \
56423 - static const struct kparam_string __param_string_##name \
56424 + static const struct kparam_string __param_string_##name __used \
56425 = { len, string }; \
56426 __module_param_call(MODULE_PARAM_PREFIX, name, \
56427 &param_ops_string, \
56428 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
56429 * module_param_named() for why this might be necessary.
56430 */
56431 #define module_param_array_named(name, array, type, nump, perm) \
56432 - static const struct kparam_array __param_arr_##name \
56433 + static const struct kparam_array __param_arr_##name __used \
56434 = { .max = ARRAY_SIZE(array), .num = nump, \
56435 .ops = &param_ops_##type, \
56436 .elemsize = sizeof(array[0]), .elem = array }; \
56437 diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
56438 --- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
56439 +++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
56440 @@ -24,7 +24,7 @@ struct nameidata {
56441 unsigned seq;
56442 int last_type;
56443 unsigned depth;
56444 - char *saved_names[MAX_NESTED_LINKS + 1];
56445 + const char *saved_names[MAX_NESTED_LINKS + 1];
56446
56447 /* Intent data */
56448 union {
56449 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
56450 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56451 extern void unlock_rename(struct dentry *, struct dentry *);
56452
56453 -static inline void nd_set_link(struct nameidata *nd, char *path)
56454 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56455 {
56456 nd->saved_names[nd->depth] = path;
56457 }
56458
56459 -static inline char *nd_get_link(struct nameidata *nd)
56460 +static inline const char *nd_get_link(const struct nameidata *nd)
56461 {
56462 return nd->saved_names[nd->depth];
56463 }
56464 diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
56465 --- linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
56466 +++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
56467 @@ -979,6 +979,7 @@ struct net_device_ops {
56468 int (*ndo_set_features)(struct net_device *dev,
56469 u32 features);
56470 };
56471 +typedef struct net_device_ops __no_const net_device_ops_no_const;
56472
56473 /*
56474 * The DEVICE structure.
56475 diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
56476 --- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56477 +++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
56478 @@ -0,0 +1,9 @@
56479 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56480 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56481 +
56482 +struct xt_gradm_mtinfo {
56483 + __u16 flags;
56484 + __u16 invflags;
56485 +};
56486 +
56487 +#endif
56488 diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
56489 --- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
56490 +++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
56491 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
56492 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56493 char const * name, ulong * val);
56494
56495 -/** Create a file for read-only access to an atomic_t. */
56496 +/** Create a file for read-only access to an atomic_unchecked_t. */
56497 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56498 - char const * name, atomic_t * val);
56499 + char const * name, atomic_unchecked_t * val);
56500
56501 /** create a directory */
56502 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56503 diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
56504 --- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
56505 +++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
56506 @@ -129,7 +129,7 @@ struct parallel_data {
56507 struct padata_instance *pinst;
56508 struct padata_parallel_queue __percpu *pqueue;
56509 struct padata_serial_queue __percpu *squeue;
56510 - atomic_t seq_nr;
56511 + atomic_unchecked_t seq_nr;
56512 atomic_t reorder_objects;
56513 atomic_t refcnt;
56514 unsigned int max_seq_nr;
56515 diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
56516 --- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
56517 +++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
56518 @@ -761,8 +761,8 @@ struct perf_event {
56519
56520 enum perf_event_active_state state;
56521 unsigned int attach_state;
56522 - local64_t count;
56523 - atomic64_t child_count;
56524 + local64_t count; /* PaX: fix it one day */
56525 + atomic64_unchecked_t child_count;
56526
56527 /*
56528 * These are the total time in nanoseconds that the event
56529 @@ -813,8 +813,8 @@ struct perf_event {
56530 * These accumulate total time (in nanoseconds) that children
56531 * events have been enabled and running, respectively.
56532 */
56533 - atomic64_t child_total_time_enabled;
56534 - atomic64_t child_total_time_running;
56535 + atomic64_unchecked_t child_total_time_enabled;
56536 + atomic64_unchecked_t child_total_time_running;
56537
56538 /*
56539 * Protect attach/detach and child_list:
56540 diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
56541 --- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
56542 +++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
56543 @@ -46,9 +46,9 @@ struct pipe_buffer {
56544 struct pipe_inode_info {
56545 wait_queue_head_t wait;
56546 unsigned int nrbufs, curbuf, buffers;
56547 - unsigned int readers;
56548 - unsigned int writers;
56549 - unsigned int waiting_writers;
56550 + atomic_t readers;
56551 + atomic_t writers;
56552 + atomic_t waiting_writers;
56553 unsigned int r_counter;
56554 unsigned int w_counter;
56555 struct page *tmp_page;
56556 diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
56557 --- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
56558 +++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
56559 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
56560
56561 static inline void pm_runtime_mark_last_busy(struct device *dev)
56562 {
56563 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
56564 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
56565 }
56566
56567 #else /* !CONFIG_PM_RUNTIME */
56568 diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
56569 --- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
56570 +++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
56571 @@ -19,8 +19,8 @@
56572 * under normal circumstances, used to verify that nobody uses
56573 * non-initialized list entries.
56574 */
56575 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56576 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56577 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56578 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56579
56580 /********** include/linux/timer.h **********/
56581 /*
56582 diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
56583 --- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
56584 +++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
56585 @@ -115,7 +115,7 @@ struct preempt_ops {
56586 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
56587 void (*sched_out)(struct preempt_notifier *notifier,
56588 struct task_struct *next);
56589 -};
56590 +} __no_const;
56591
56592 /**
56593 * preempt_notifier - key for installing preemption notifiers
56594 diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
56595 --- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
56596 +++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
56597 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56598 return proc_create_data(name, mode, parent, proc_fops, NULL);
56599 }
56600
56601 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56602 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56603 +{
56604 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56605 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56606 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56607 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56608 +#else
56609 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56610 +#endif
56611 +}
56612 +
56613 +
56614 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56615 mode_t mode, struct proc_dir_entry *base,
56616 read_proc_t *read_proc, void * data)
56617 @@ -258,7 +271,7 @@ union proc_op {
56618 int (*proc_show)(struct seq_file *m,
56619 struct pid_namespace *ns, struct pid *pid,
56620 struct task_struct *task);
56621 -};
56622 +} __no_const;
56623
56624 struct ctl_table_header;
56625 struct ctl_table;
56626 diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
56627 --- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
56628 +++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
56629 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
56630 extern void exit_ptrace(struct task_struct *tracer);
56631 #define PTRACE_MODE_READ 1
56632 #define PTRACE_MODE_ATTACH 2
56633 -/* Returns 0 on success, -errno on denial. */
56634 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56635 /* Returns true on success, false on denial. */
56636 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56637 +/* Returns true on success, false on denial. */
56638 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56639
56640 static inline int ptrace_reparented(struct task_struct *child)
56641 {
56642 diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
56643 --- linux-3.0.4/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
56644 +++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
56645 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
56646
56647 u32 prandom32(struct rnd_state *);
56648
56649 +static inline unsigned long pax_get_random_long(void)
56650 +{
56651 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56652 +}
56653 +
56654 /*
56655 * Handle minimum values for seeds
56656 */
56657 static inline u32 __seed(u32 x, u32 m)
56658 {
56659 - return (x < m) ? x + m : x;
56660 + return (x <= m) ? x + m + 1 : x;
56661 }
56662
56663 /**
56664 diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
56665 --- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
56666 +++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
56667 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56668 * Architecture-specific implementations of sys_reboot commands.
56669 */
56670
56671 -extern void machine_restart(char *cmd);
56672 -extern void machine_halt(void);
56673 -extern void machine_power_off(void);
56674 +extern void machine_restart(char *cmd) __noreturn;
56675 +extern void machine_halt(void) __noreturn;
56676 +extern void machine_power_off(void) __noreturn;
56677
56678 extern void machine_shutdown(void);
56679 struct pt_regs;
56680 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56681 */
56682
56683 extern void kernel_restart_prepare(char *cmd);
56684 -extern void kernel_restart(char *cmd);
56685 -extern void kernel_halt(void);
56686 -extern void kernel_power_off(void);
56687 +extern void kernel_restart(char *cmd) __noreturn;
56688 +extern void kernel_halt(void) __noreturn;
56689 +extern void kernel_power_off(void) __noreturn;
56690
56691 extern int C_A_D; /* for sysctl */
56692 void ctrl_alt_del(void);
56693 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
56694 * Emergency restart, callable from an interrupt handler.
56695 */
56696
56697 -extern void emergency_restart(void);
56698 +extern void emergency_restart(void) __noreturn;
56699 #include <asm/emergency-restart.h>
56700
56701 #endif
56702 diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
56703 --- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
56704 +++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
56705 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
56706 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56707
56708 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56709 -#define get_generation(s) atomic_read (&fs_generation(s))
56710 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56711 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56712 #define __fs_changed(gen,s) (gen != get_generation (s))
56713 #define fs_changed(gen,s) \
56714 diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
56715 --- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
56716 +++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
56717 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
56718 /* Comment? -Hans */
56719 wait_queue_head_t s_wait;
56720 /* To be obsoleted soon by per buffer seals.. -Hans */
56721 - atomic_t s_generation_counter; // increased by one every time the
56722 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56723 // tree gets re-balanced
56724 unsigned long s_properties; /* File system properties. Currently holds
56725 on-disk FS format */
56726 diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
56727 --- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
56728 +++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
56729 @@ -159,7 +159,7 @@ struct rchan_callbacks
56730 * The callback should return 0 if successful, negative if not.
56731 */
56732 int (*remove_buf_file)(struct dentry *dentry);
56733 -};
56734 +} __no_const;
56735
56736 /*
56737 * CONFIG_RELAY kernel API, kernel/relay.c
56738 diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
56739 --- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
56740 +++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
56741 @@ -147,6 +147,7 @@ struct rfkill_ops {
56742 void (*query)(struct rfkill *rfkill, void *data);
56743 int (*set_block)(void *data, bool blocked);
56744 };
56745 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
56746
56747 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
56748 /**
56749 diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
56750 --- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
56751 +++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
56752 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
56753 void anon_vma_init(void); /* create anon_vma_cachep */
56754 int anon_vma_prepare(struct vm_area_struct *);
56755 void unlink_anon_vmas(struct vm_area_struct *);
56756 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
56757 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
56758 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
56759 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
56760 void __anon_vma_link(struct vm_area_struct *);
56761
56762 static inline void anon_vma_merge(struct vm_area_struct *vma,
56763 diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
56764 --- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
56765 +++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
56766 @@ -100,6 +100,7 @@ struct bio_list;
56767 struct fs_struct;
56768 struct perf_event_context;
56769 struct blk_plug;
56770 +struct linux_binprm;
56771
56772 /*
56773 * List of flags we want to share for kernel threads,
56774 @@ -380,10 +381,13 @@ struct user_namespace;
56775 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56776
56777 extern int sysctl_max_map_count;
56778 +extern unsigned long sysctl_heap_stack_gap;
56779
56780 #include <linux/aio.h>
56781
56782 #ifdef CONFIG_MMU
56783 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56784 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56785 extern void arch_pick_mmap_layout(struct mm_struct *mm);
56786 extern unsigned long
56787 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56788 @@ -629,6 +633,17 @@ struct signal_struct {
56789 #ifdef CONFIG_TASKSTATS
56790 struct taskstats *stats;
56791 #endif
56792 +
56793 +#ifdef CONFIG_GRKERNSEC
56794 + u32 curr_ip;
56795 + u32 saved_ip;
56796 + u32 gr_saddr;
56797 + u32 gr_daddr;
56798 + u16 gr_sport;
56799 + u16 gr_dport;
56800 + u8 used_accept:1;
56801 +#endif
56802 +
56803 #ifdef CONFIG_AUDIT
56804 unsigned audit_tty;
56805 struct tty_audit_buf *tty_audit_buf;
56806 @@ -710,6 +725,11 @@ struct user_struct {
56807 struct key *session_keyring; /* UID's default session keyring */
56808 #endif
56809
56810 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56811 + unsigned int banned;
56812 + unsigned long ban_expires;
56813 +#endif
56814 +
56815 /* Hash table maintenance information */
56816 struct hlist_node uidhash_node;
56817 uid_t uid;
56818 @@ -1340,8 +1360,8 @@ struct task_struct {
56819 struct list_head thread_group;
56820
56821 struct completion *vfork_done; /* for vfork() */
56822 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56823 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56824 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56825 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56826
56827 cputime_t utime, stime, utimescaled, stimescaled;
56828 cputime_t gtime;
56829 @@ -1357,13 +1377,6 @@ struct task_struct {
56830 struct task_cputime cputime_expires;
56831 struct list_head cpu_timers[3];
56832
56833 -/* process credentials */
56834 - const struct cred __rcu *real_cred; /* objective and real subjective task
56835 - * credentials (COW) */
56836 - const struct cred __rcu *cred; /* effective (overridable) subjective task
56837 - * credentials (COW) */
56838 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56839 -
56840 char comm[TASK_COMM_LEN]; /* executable name excluding path
56841 - access with [gs]et_task_comm (which lock
56842 it with task_lock())
56843 @@ -1380,8 +1393,16 @@ struct task_struct {
56844 #endif
56845 /* CPU-specific state of this task */
56846 struct thread_struct thread;
56847 +/* thread_info moved to task_struct */
56848 +#ifdef CONFIG_X86
56849 + struct thread_info tinfo;
56850 +#endif
56851 /* filesystem information */
56852 struct fs_struct *fs;
56853 +
56854 + const struct cred __rcu *cred; /* effective (overridable) subjective task
56855 + * credentials (COW) */
56856 +
56857 /* open file information */
56858 struct files_struct *files;
56859 /* namespaces */
56860 @@ -1428,6 +1449,11 @@ struct task_struct {
56861 struct rt_mutex_waiter *pi_blocked_on;
56862 #endif
56863
56864 +/* process credentials */
56865 + const struct cred __rcu *real_cred; /* objective and real subjective task
56866 + * credentials (COW) */
56867 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56868 +
56869 #ifdef CONFIG_DEBUG_MUTEXES
56870 /* mutex deadlock detection */
56871 struct mutex_waiter *blocked_on;
56872 @@ -1538,6 +1564,21 @@ struct task_struct {
56873 unsigned long default_timer_slack_ns;
56874
56875 struct list_head *scm_work_list;
56876 +
56877 +#ifdef CONFIG_GRKERNSEC
56878 + /* grsecurity */
56879 + struct dentry *gr_chroot_dentry;
56880 + struct acl_subject_label *acl;
56881 + struct acl_role_label *role;
56882 + struct file *exec_file;
56883 + u16 acl_role_id;
56884 + /* is this the task that authenticated to the special role */
56885 + u8 acl_sp_role;
56886 + u8 is_writable;
56887 + u8 brute;
56888 + u8 gr_is_chrooted;
56889 +#endif
56890 +
56891 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56892 /* Index of current stored address in ret_stack */
56893 int curr_ret_stack;
56894 @@ -1572,6 +1613,57 @@ struct task_struct {
56895 #endif
56896 };
56897
56898 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56899 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56900 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56901 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56902 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56903 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56904 +
56905 +#ifdef CONFIG_PAX_SOFTMODE
56906 +extern int pax_softmode;
56907 +#endif
56908 +
56909 +extern int pax_check_flags(unsigned long *);
56910 +
56911 +/* if tsk != current then task_lock must be held on it */
56912 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56913 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
56914 +{
56915 + if (likely(tsk->mm))
56916 + return tsk->mm->pax_flags;
56917 + else
56918 + return 0UL;
56919 +}
56920 +
56921 +/* if tsk != current then task_lock must be held on it */
56922 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56923 +{
56924 + if (likely(tsk->mm)) {
56925 + tsk->mm->pax_flags = flags;
56926 + return 0;
56927 + }
56928 + return -EINVAL;
56929 +}
56930 +#endif
56931 +
56932 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56933 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
56934 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56935 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56936 +#endif
56937 +
56938 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56939 +extern void pax_report_insns(void *pc, void *sp);
56940 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
56941 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
56942 +
56943 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56944 +extern void pax_track_stack(void);
56945 +#else
56946 +static inline void pax_track_stack(void) {}
56947 +#endif
56948 +
56949 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56950 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
56951
56952 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
56953 #define PF_DUMPCORE 0x00000200 /* dumped core */
56954 #define PF_SIGNALED 0x00000400 /* killed by a signal */
56955 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
56956 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
56957 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
56958 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
56959 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
56960 @@ -2056,7 +2149,9 @@ void yield(void);
56961 extern struct exec_domain default_exec_domain;
56962
56963 union thread_union {
56964 +#ifndef CONFIG_X86
56965 struct thread_info thread_info;
56966 +#endif
56967 unsigned long stack[THREAD_SIZE/sizeof(long)];
56968 };
56969
56970 @@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
56971 */
56972
56973 extern struct task_struct *find_task_by_vpid(pid_t nr);
56974 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
56975 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
56976 struct pid_namespace *ns);
56977
56978 @@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
56979 extern void exit_itimers(struct signal_struct *);
56980 extern void flush_itimer_signals(void);
56981
56982 -extern NORET_TYPE void do_group_exit(int);
56983 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56984
56985 extern void daemonize(const char *, ...);
56986 extern int allow_signal(int);
56987 @@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
56988
56989 #endif
56990
56991 -static inline int object_is_on_stack(void *obj)
56992 +static inline int object_starts_on_stack(void *obj)
56993 {
56994 - void *stack = task_stack_page(current);
56995 + const void *stack = task_stack_page(current);
56996
56997 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56998 }
56999
57000 +#ifdef CONFIG_PAX_USERCOPY
57001 +extern int object_is_on_stack(const void *obj, unsigned long len);
57002 +#endif
57003 +
57004 extern void thread_info_cache_init(void);
57005
57006 #ifdef CONFIG_DEBUG_STACK_USAGE
57007 diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57008 --- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
57009 +++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
57010 @@ -43,7 +43,8 @@ struct screen_info {
57011 __u16 pages; /* 0x32 */
57012 __u16 vesa_attributes; /* 0x34 */
57013 __u32 capabilities; /* 0x36 */
57014 - __u8 _reserved[6]; /* 0x3a */
57015 + __u16 vesapm_size; /* 0x3a */
57016 + __u8 _reserved[4]; /* 0x3c */
57017 } __attribute__((packed));
57018
57019 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57020 diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57021 --- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
57022 +++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
57023 @@ -36,6 +36,7 @@
57024 #include <linux/key.h>
57025 #include <linux/xfrm.h>
57026 #include <linux/slab.h>
57027 +#include <linux/grsecurity.h>
57028 #include <net/flow.h>
57029
57030 /* Maximum number of letters for an LSM name string */
57031 diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57032 --- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
57033 +++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
57034 @@ -32,6 +32,7 @@ struct seq_operations {
57035 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57036 int (*show) (struct seq_file *m, void *v);
57037 };
57038 +typedef struct seq_operations __no_const seq_operations_no_const;
57039
57040 #define SEQ_SKIP 1
57041
57042 diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57043 --- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
57044 +++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
57045 @@ -10,7 +10,7 @@
57046
57047 #define SHMEM_NR_DIRECT 16
57048
57049 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57050 +#define SHMEM_SYMLINK_INLINE_LEN 64
57051
57052 struct shmem_inode_info {
57053 spinlock_t lock;
57054 diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57055 --- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
57056 +++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
57057 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57058 pid_t shm_cprid;
57059 pid_t shm_lprid;
57060 struct user_struct *mlock_user;
57061 +#ifdef CONFIG_GRKERNSEC
57062 + time_t shm_createtime;
57063 + pid_t shm_lapid;
57064 +#endif
57065 };
57066
57067 /* shm_mode upper byte flags */
57068 diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57069 --- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
57070 +++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
57071 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57072 */
57073 static inline int skb_queue_empty(const struct sk_buff_head *list)
57074 {
57075 - return list->next == (struct sk_buff *)list;
57076 + return list->next == (const struct sk_buff *)list;
57077 }
57078
57079 /**
57080 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
57081 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57082 const struct sk_buff *skb)
57083 {
57084 - return skb->next == (struct sk_buff *)list;
57085 + return skb->next == (const struct sk_buff *)list;
57086 }
57087
57088 /**
57089 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57090 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57091 const struct sk_buff *skb)
57092 {
57093 - return skb->prev == (struct sk_buff *)list;
57094 + return skb->prev == (const struct sk_buff *)list;
57095 }
57096
57097 /**
57098 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57099 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57100 */
57101 #ifndef NET_SKB_PAD
57102 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
57103 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
57104 #endif
57105
57106 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57107 diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57108 --- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
57109 +++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
57110 @@ -96,10 +96,10 @@ struct kmem_cache {
57111 unsigned long node_allocs;
57112 unsigned long node_frees;
57113 unsigned long node_overflow;
57114 - atomic_t allochit;
57115 - atomic_t allocmiss;
57116 - atomic_t freehit;
57117 - atomic_t freemiss;
57118 + atomic_unchecked_t allochit;
57119 + atomic_unchecked_t allocmiss;
57120 + atomic_unchecked_t freehit;
57121 + atomic_unchecked_t freemiss;
57122
57123 /*
57124 * If debugging is enabled, then the allocator can add additional
57125 diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57126 --- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
57127 +++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
57128 @@ -11,12 +11,20 @@
57129
57130 #include <linux/gfp.h>
57131 #include <linux/types.h>
57132 +#include <linux/err.h>
57133
57134 /*
57135 * Flags to pass to kmem_cache_create().
57136 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57137 */
57138 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57139 +
57140 +#ifdef CONFIG_PAX_USERCOPY
57141 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57142 +#else
57143 +#define SLAB_USERCOPY 0x00000000UL
57144 +#endif
57145 +
57146 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57147 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57148 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57149 @@ -87,10 +95,13 @@
57150 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57151 * Both make kfree a no-op.
57152 */
57153 -#define ZERO_SIZE_PTR ((void *)16)
57154 +#define ZERO_SIZE_PTR \
57155 +({ \
57156 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57157 + (void *)(-MAX_ERRNO-1L); \
57158 +})
57159
57160 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57161 - (unsigned long)ZERO_SIZE_PTR)
57162 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57163
57164 /*
57165 * struct kmem_cache related prototypes
57166 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
57167 void kfree(const void *);
57168 void kzfree(const void *);
57169 size_t ksize(const void *);
57170 +void check_object_size(const void *ptr, unsigned long n, bool to);
57171
57172 /*
57173 * Allocator specific definitions. These are mainly used to establish optimized
57174 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
57175
57176 void __init kmem_cache_init_late(void);
57177
57178 +#define kmalloc(x, y) \
57179 +({ \
57180 + void *___retval; \
57181 + intoverflow_t ___x = (intoverflow_t)x; \
57182 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
57183 + ___retval = NULL; \
57184 + else \
57185 + ___retval = kmalloc((size_t)___x, (y)); \
57186 + ___retval; \
57187 +})
57188 +
57189 +#define kmalloc_node(x, y, z) \
57190 +({ \
57191 + void *___retval; \
57192 + intoverflow_t ___x = (intoverflow_t)x; \
57193 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57194 + ___retval = NULL; \
57195 + else \
57196 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57197 + ___retval; \
57198 +})
57199 +
57200 +#define kzalloc(x, y) \
57201 +({ \
57202 + void *___retval; \
57203 + intoverflow_t ___x = (intoverflow_t)x; \
57204 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
57205 + ___retval = NULL; \
57206 + else \
57207 + ___retval = kzalloc((size_t)___x, (y)); \
57208 + ___retval; \
57209 +})
57210 +
57211 +#define __krealloc(x, y, z) \
57212 +({ \
57213 + void *___retval; \
57214 + intoverflow_t ___y = (intoverflow_t)y; \
57215 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57216 + ___retval = NULL; \
57217 + else \
57218 + ___retval = __krealloc((x), (size_t)___y, (z)); \
57219 + ___retval; \
57220 +})
57221 +
57222 +#define krealloc(x, y, z) \
57223 +({ \
57224 + void *___retval; \
57225 + intoverflow_t ___y = (intoverflow_t)y; \
57226 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57227 + ___retval = NULL; \
57228 + else \
57229 + ___retval = krealloc((x), (size_t)___y, (z)); \
57230 + ___retval; \
57231 +})
57232 +
57233 #endif /* _LINUX_SLAB_H */
57234 diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57235 --- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
57236 +++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
57237 @@ -82,7 +82,7 @@ struct kmem_cache {
57238 struct kmem_cache_order_objects max;
57239 struct kmem_cache_order_objects min;
57240 gfp_t allocflags; /* gfp flags to use on each alloc */
57241 - int refcount; /* Refcount for slab cache destroy */
57242 + atomic_t refcount; /* Refcount for slab cache destroy */
57243 void (*ctor)(void *);
57244 int inuse; /* Offset to metadata */
57245 int align; /* Alignment */
57246 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57247 }
57248
57249 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57250 -void *__kmalloc(size_t size, gfp_t flags);
57251 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57252
57253 static __always_inline void *
57254 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57255 diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57256 --- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
57257 +++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
57258 @@ -61,7 +61,7 @@ struct sonet_stats {
57259 #include <asm/atomic.h>
57260
57261 struct k_sonet_stats {
57262 -#define __HANDLE_ITEM(i) atomic_t i
57263 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57264 __SONET_ITEMS
57265 #undef __HANDLE_ITEM
57266 };
57267 diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57268 --- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
57269 +++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
57270 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57271 {
57272 switch (sap->sa_family) {
57273 case AF_INET:
57274 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57275 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57276 case AF_INET6:
57277 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57278 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57279 }
57280 return 0;
57281 }
57282 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57283 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57284 const struct sockaddr *src)
57285 {
57286 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57287 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57288 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57289
57290 dsin->sin_family = ssin->sin_family;
57291 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57292 if (sa->sa_family != AF_INET6)
57293 return 0;
57294
57295 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57296 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57297 }
57298
57299 #endif /* __KERNEL__ */
57300 diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57301 --- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57302 +++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57303 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57304 extern unsigned int svcrdma_max_requests;
57305 extern unsigned int svcrdma_max_req_size;
57306
57307 -extern atomic_t rdma_stat_recv;
57308 -extern atomic_t rdma_stat_read;
57309 -extern atomic_t rdma_stat_write;
57310 -extern atomic_t rdma_stat_sq_starve;
57311 -extern atomic_t rdma_stat_rq_starve;
57312 -extern atomic_t rdma_stat_rq_poll;
57313 -extern atomic_t rdma_stat_rq_prod;
57314 -extern atomic_t rdma_stat_sq_poll;
57315 -extern atomic_t rdma_stat_sq_prod;
57316 +extern atomic_unchecked_t rdma_stat_recv;
57317 +extern atomic_unchecked_t rdma_stat_read;
57318 +extern atomic_unchecked_t rdma_stat_write;
57319 +extern atomic_unchecked_t rdma_stat_sq_starve;
57320 +extern atomic_unchecked_t rdma_stat_rq_starve;
57321 +extern atomic_unchecked_t rdma_stat_rq_poll;
57322 +extern atomic_unchecked_t rdma_stat_rq_prod;
57323 +extern atomic_unchecked_t rdma_stat_sq_poll;
57324 +extern atomic_unchecked_t rdma_stat_sq_prod;
57325
57326 #define RPCRDMA_VERSION 1
57327
57328 diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57329 --- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
57330 +++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
57331 @@ -155,7 +155,11 @@ enum
57332 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57333 };
57334
57335 -
57336 +#ifdef CONFIG_PAX_SOFTMODE
57337 +enum {
57338 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57339 +};
57340 +#endif
57341
57342 /* CTL_VM names: */
57343 enum
57344 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57345
57346 extern int proc_dostring(struct ctl_table *, int,
57347 void __user *, size_t *, loff_t *);
57348 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57349 + void __user *, size_t *, loff_t *);
57350 extern int proc_dointvec(struct ctl_table *, int,
57351 void __user *, size_t *, loff_t *);
57352 extern int proc_dointvec_minmax(struct ctl_table *, int,
57353 diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57354 --- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
57355 +++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
57356 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57357
57358 struct module *owner;
57359
57360 - int refcount;
57361 + atomic_t refcount;
57362 };
57363
57364 struct tty_ldisc {
57365 diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57366 --- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
57367 +++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
57368 @@ -213,10 +213,26 @@ typedef struct {
57369 int counter;
57370 } atomic_t;
57371
57372 +#ifdef CONFIG_PAX_REFCOUNT
57373 +typedef struct {
57374 + int counter;
57375 +} atomic_unchecked_t;
57376 +#else
57377 +typedef atomic_t atomic_unchecked_t;
57378 +#endif
57379 +
57380 #ifdef CONFIG_64BIT
57381 typedef struct {
57382 long counter;
57383 } atomic64_t;
57384 +
57385 +#ifdef CONFIG_PAX_REFCOUNT
57386 +typedef struct {
57387 + long counter;
57388 +} atomic64_unchecked_t;
57389 +#else
57390 +typedef atomic64_t atomic64_unchecked_t;
57391 +#endif
57392 #endif
57393
57394 struct list_head {
57395 diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57396 --- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57397 +++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57398 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57399 long ret; \
57400 mm_segment_t old_fs = get_fs(); \
57401 \
57402 - set_fs(KERNEL_DS); \
57403 pagefault_disable(); \
57404 + set_fs(KERNEL_DS); \
57405 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57406 - pagefault_enable(); \
57407 set_fs(old_fs); \
57408 + pagefault_enable(); \
57409 ret; \
57410 })
57411
57412 diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
57413 --- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
57414 +++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
57415 @@ -6,32 +6,32 @@
57416
57417 static inline u16 get_unaligned_le16(const void *p)
57418 {
57419 - return le16_to_cpup((__le16 *)p);
57420 + return le16_to_cpup((const __le16 *)p);
57421 }
57422
57423 static inline u32 get_unaligned_le32(const void *p)
57424 {
57425 - return le32_to_cpup((__le32 *)p);
57426 + return le32_to_cpup((const __le32 *)p);
57427 }
57428
57429 static inline u64 get_unaligned_le64(const void *p)
57430 {
57431 - return le64_to_cpup((__le64 *)p);
57432 + return le64_to_cpup((const __le64 *)p);
57433 }
57434
57435 static inline u16 get_unaligned_be16(const void *p)
57436 {
57437 - return be16_to_cpup((__be16 *)p);
57438 + return be16_to_cpup((const __be16 *)p);
57439 }
57440
57441 static inline u32 get_unaligned_be32(const void *p)
57442 {
57443 - return be32_to_cpup((__be32 *)p);
57444 + return be32_to_cpup((const __be32 *)p);
57445 }
57446
57447 static inline u64 get_unaligned_be64(const void *p)
57448 {
57449 - return be64_to_cpup((__be64 *)p);
57450 + return be64_to_cpup((const __be64 *)p);
57451 }
57452
57453 static inline void put_unaligned_le16(u16 val, void *p)
57454 diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
57455 --- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
57456 +++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
57457 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57458 #define VM_MAP 0x00000004 /* vmap()ed pages */
57459 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57460 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57461 +
57462 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57463 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57464 +#endif
57465 +
57466 /* bits [20..32] reserved for arch specific ioremap internals */
57467
57468 /*
57469 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
57470 # endif
57471 #endif
57472
57473 +#define vmalloc(x) \
57474 +({ \
57475 + void *___retval; \
57476 + intoverflow_t ___x = (intoverflow_t)x; \
57477 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57478 + ___retval = NULL; \
57479 + else \
57480 + ___retval = vmalloc((unsigned long)___x); \
57481 + ___retval; \
57482 +})
57483 +
57484 +#define vzalloc(x) \
57485 +({ \
57486 + void *___retval; \
57487 + intoverflow_t ___x = (intoverflow_t)x; \
57488 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
57489 + ___retval = NULL; \
57490 + else \
57491 + ___retval = vzalloc((unsigned long)___x); \
57492 + ___retval; \
57493 +})
57494 +
57495 +#define __vmalloc(x, y, z) \
57496 +({ \
57497 + void *___retval; \
57498 + intoverflow_t ___x = (intoverflow_t)x; \
57499 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57500 + ___retval = NULL; \
57501 + else \
57502 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57503 + ___retval; \
57504 +})
57505 +
57506 +#define vmalloc_user(x) \
57507 +({ \
57508 + void *___retval; \
57509 + intoverflow_t ___x = (intoverflow_t)x; \
57510 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57511 + ___retval = NULL; \
57512 + else \
57513 + ___retval = vmalloc_user((unsigned long)___x); \
57514 + ___retval; \
57515 +})
57516 +
57517 +#define vmalloc_exec(x) \
57518 +({ \
57519 + void *___retval; \
57520 + intoverflow_t ___x = (intoverflow_t)x; \
57521 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57522 + ___retval = NULL; \
57523 + else \
57524 + ___retval = vmalloc_exec((unsigned long)___x); \
57525 + ___retval; \
57526 +})
57527 +
57528 +#define vmalloc_node(x, y) \
57529 +({ \
57530 + void *___retval; \
57531 + intoverflow_t ___x = (intoverflow_t)x; \
57532 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57533 + ___retval = NULL; \
57534 + else \
57535 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57536 + ___retval; \
57537 +})
57538 +
57539 +#define vzalloc_node(x, y) \
57540 +({ \
57541 + void *___retval; \
57542 + intoverflow_t ___x = (intoverflow_t)x; \
57543 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
57544 + ___retval = NULL; \
57545 + else \
57546 + ___retval = vzalloc_node((unsigned long)___x, (y));\
57547 + ___retval; \
57548 +})
57549 +
57550 +#define vmalloc_32(x) \
57551 +({ \
57552 + void *___retval; \
57553 + intoverflow_t ___x = (intoverflow_t)x; \
57554 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57555 + ___retval = NULL; \
57556 + else \
57557 + ___retval = vmalloc_32((unsigned long)___x); \
57558 + ___retval; \
57559 +})
57560 +
57561 +#define vmalloc_32_user(x) \
57562 +({ \
57563 +void *___retval; \
57564 + intoverflow_t ___x = (intoverflow_t)x; \
57565 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57566 + ___retval = NULL; \
57567 + else \
57568 + ___retval = vmalloc_32_user((unsigned long)___x);\
57569 + ___retval; \
57570 +})
57571 +
57572 #endif /* _LINUX_VMALLOC_H */
57573 diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
57574 --- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
57575 +++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
57576 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
57577 /*
57578 * Zone based page accounting with per cpu differentials.
57579 */
57580 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57581 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57582
57583 static inline void zone_page_state_add(long x, struct zone *zone,
57584 enum zone_stat_item item)
57585 {
57586 - atomic_long_add(x, &zone->vm_stat[item]);
57587 - atomic_long_add(x, &vm_stat[item]);
57588 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57589 + atomic_long_add_unchecked(x, &vm_stat[item]);
57590 }
57591
57592 static inline unsigned long global_page_state(enum zone_stat_item item)
57593 {
57594 - long x = atomic_long_read(&vm_stat[item]);
57595 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57596 #ifdef CONFIG_SMP
57597 if (x < 0)
57598 x = 0;
57599 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
57600 static inline unsigned long zone_page_state(struct zone *zone,
57601 enum zone_stat_item item)
57602 {
57603 - long x = atomic_long_read(&zone->vm_stat[item]);
57604 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57605 #ifdef CONFIG_SMP
57606 if (x < 0)
57607 x = 0;
57608 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
57609 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57610 enum zone_stat_item item)
57611 {
57612 - long x = atomic_long_read(&zone->vm_stat[item]);
57613 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57614
57615 #ifdef CONFIG_SMP
57616 int cpu;
57617 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
57618
57619 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57620 {
57621 - atomic_long_inc(&zone->vm_stat[item]);
57622 - atomic_long_inc(&vm_stat[item]);
57623 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57624 + atomic_long_inc_unchecked(&vm_stat[item]);
57625 }
57626
57627 static inline void __inc_zone_page_state(struct page *page,
57628 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
57629
57630 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57631 {
57632 - atomic_long_dec(&zone->vm_stat[item]);
57633 - atomic_long_dec(&vm_stat[item]);
57634 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57635 + atomic_long_dec_unchecked(&vm_stat[item]);
57636 }
57637
57638 static inline void __dec_zone_page_state(struct page *page,
57639 diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
57640 --- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
57641 +++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
57642 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
57643 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
57644
57645 /* the extension can override this */
57646 - struct v4l2_ioctl_ops ops;
57647 + v4l2_ioctl_ops_no_const ops;
57648 /* pointer to the saa7146 core ops */
57649 const struct v4l2_ioctl_ops *core_ops;
57650
57651 diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
57652 --- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
57653 +++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
57654 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
57655 long (*vidioc_default) (struct file *file, void *fh,
57656 bool valid_prio, int cmd, void *arg);
57657 };
57658 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
57659
57660
57661 /* v4l debugging and diagnostics */
57662 diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
57663 --- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
57664 +++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
57665 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
57666 void (*radioset_rsp)(void);
57667 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
57668 struct cflayer *client_layer);
57669 -};
57670 +} __no_const;
57671
57672 /* Link Setup Parameters for CAIF-Links. */
57673 struct cfctrl_link_param {
57674 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
57675 struct cfctrl {
57676 struct cfsrvl serv;
57677 struct cfctrl_rsp res;
57678 - atomic_t req_seq_no;
57679 - atomic_t rsp_seq_no;
57680 + atomic_unchecked_t req_seq_no;
57681 + atomic_unchecked_t rsp_seq_no;
57682 struct list_head list;
57683 /* Protects from simultaneous access to first_req list */
57684 spinlock_t info_list_lock;
57685 diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
57686 --- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
57687 +++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
57688 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
57689 u8 dir, flow_resolve_t resolver, void *ctx);
57690
57691 extern void flow_cache_flush(void);
57692 -extern atomic_t flow_cache_genid;
57693 +extern atomic_unchecked_t flow_cache_genid;
57694
57695 #endif
57696 diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
57697 --- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
57698 +++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
57699 @@ -43,8 +43,8 @@ struct inet_peer {
57700 */
57701 union {
57702 struct {
57703 - atomic_t rid; /* Frag reception counter */
57704 - atomic_t ip_id_count; /* IP ID for the next packet */
57705 + atomic_unchecked_t rid; /* Frag reception counter */
57706 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
57707 __u32 tcp_ts;
57708 __u32 tcp_ts_stamp;
57709 u32 metrics[RTAX_MAX];
57710 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
57711 {
57712 more++;
57713 inet_peer_refcheck(p);
57714 - return atomic_add_return(more, &p->ip_id_count) - more;
57715 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
57716 }
57717
57718 #endif /* _NET_INETPEER_H */
57719 diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
57720 --- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
57721 +++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
57722 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
57723
57724 #define FIB_RES_SADDR(net, res) \
57725 ((FIB_RES_NH(res).nh_saddr_genid == \
57726 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
57727 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
57728 FIB_RES_NH(res).nh_saddr : \
57729 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
57730 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
57731 diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
57732 --- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
57733 +++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
57734 @@ -509,7 +509,7 @@ struct ip_vs_conn {
57735 struct ip_vs_conn *control; /* Master control connection */
57736 atomic_t n_control; /* Number of controlled ones */
57737 struct ip_vs_dest *dest; /* real server */
57738 - atomic_t in_pkts; /* incoming packet counter */
57739 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57740
57741 /* packet transmitter for different forwarding methods. If it
57742 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57743 @@ -647,7 +647,7 @@ struct ip_vs_dest {
57744 __be16 port; /* port number of the server */
57745 union nf_inet_addr addr; /* IP address of the server */
57746 volatile unsigned flags; /* dest status flags */
57747 - atomic_t conn_flags; /* flags to copy to conn */
57748 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57749 atomic_t weight; /* server weight */
57750
57751 atomic_t refcnt; /* reference counter */
57752 diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
57753 --- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
57754 +++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
57755 @@ -51,7 +51,7 @@ typedef struct {
57756 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
57757 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
57758 struct ircomm_info *);
57759 -} call_t;
57760 +} __no_const call_t;
57761
57762 struct ircomm_cb {
57763 irda_queue_t queue;
57764 diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
57765 --- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
57766 +++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
57767 @@ -35,6 +35,7 @@
57768 #include <linux/termios.h>
57769 #include <linux/timer.h>
57770 #include <linux/tty.h> /* struct tty_struct */
57771 +#include <asm/local.h>
57772
57773 #include <net/irda/irias_object.h>
57774 #include <net/irda/ircomm_core.h>
57775 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57776 unsigned short close_delay;
57777 unsigned short closing_wait; /* time to wait before closing */
57778
57779 - int open_count;
57780 - int blocked_open; /* # of blocked opens */
57781 + local_t open_count;
57782 + local_t blocked_open; /* # of blocked opens */
57783
57784 /* Protect concurent access to :
57785 * o self->open_count
57786 diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
57787 --- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
57788 +++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
57789 @@ -87,7 +87,7 @@ struct iucv_sock {
57790 struct iucv_sock_list {
57791 struct hlist_head head;
57792 rwlock_t lock;
57793 - atomic_t autobind_name;
57794 + atomic_unchecked_t autobind_name;
57795 };
57796
57797 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57798 diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
57799 --- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
57800 +++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
57801 @@ -95,7 +95,7 @@ struct lapb_cb {
57802 struct sk_buff_head write_queue;
57803 struct sk_buff_head ack_queue;
57804 unsigned char window;
57805 - struct lapb_register_struct callbacks;
57806 + struct lapb_register_struct *callbacks;
57807
57808 /* FRMR control information */
57809 struct lapb_frame frmr_data;
57810 diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
57811 --- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
57812 +++ linux-3.0.4/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
57813 @@ -117,14 +117,14 @@ struct neighbour {
57814 };
57815
57816 struct neigh_ops {
57817 - int family;
57818 + const int family;
57819 void (*solicit)(struct neighbour *, struct sk_buff*);
57820 void (*error_report)(struct neighbour *, struct sk_buff*);
57821 int (*output)(struct sk_buff*);
57822 int (*connected_output)(struct sk_buff*);
57823 int (*hh_output)(struct sk_buff*);
57824 int (*queue_xmit)(struct sk_buff*);
57825 -};
57826 +} __do_const;
57827
57828 struct pneigh_entry {
57829 struct pneigh_entry *next;
57830 diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
57831 --- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
57832 +++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
57833 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
57834 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57835 {
57836 if (mark)
57837 - skb_trim(skb, (unsigned char *) mark - skb->data);
57838 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57839 }
57840
57841 /**
57842 diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
57843 --- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
57844 +++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
57845 @@ -56,8 +56,8 @@ struct netns_ipv4 {
57846
57847 unsigned int sysctl_ping_group_range[2];
57848
57849 - atomic_t rt_genid;
57850 - atomic_t dev_addr_genid;
57851 + atomic_unchecked_t rt_genid;
57852 + atomic_unchecked_t dev_addr_genid;
57853
57854 #ifdef CONFIG_IP_MROUTE
57855 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
57856 diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
57857 --- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
57858 +++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
57859 @@ -315,9 +315,9 @@ do { \
57860
57861 #else /* SCTP_DEBUG */
57862
57863 -#define SCTP_DEBUG_PRINTK(whatever...)
57864 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
57865 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57866 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57867 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
57868 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57869 #define SCTP_ENABLE_DEBUG
57870 #define SCTP_DISABLE_DEBUG
57871 #define SCTP_ASSERT(expr, str, func)
57872 diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
57873 --- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
57874 +++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
57875 @@ -277,7 +277,7 @@ struct sock {
57876 #ifdef CONFIG_RPS
57877 __u32 sk_rxhash;
57878 #endif
57879 - atomic_t sk_drops;
57880 + atomic_unchecked_t sk_drops;
57881 int sk_rcvbuf;
57882
57883 struct sk_filter __rcu *sk_filter;
57884 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
57885 }
57886
57887 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
57888 - char __user *from, char *to,
57889 + char __user *from, unsigned char *to,
57890 int copy, int offset)
57891 {
57892 if (skb->ip_summed == CHECKSUM_NONE) {
57893 diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
57894 --- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
57895 +++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
57896 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
57897 struct tcp_seq_afinfo {
57898 char *name;
57899 sa_family_t family;
57900 - struct file_operations seq_fops;
57901 - struct seq_operations seq_ops;
57902 + file_operations_no_const seq_fops;
57903 + seq_operations_no_const seq_ops;
57904 };
57905
57906 struct tcp_iter_state {
57907 diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
57908 --- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
57909 +++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
57910 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
57911 char *name;
57912 sa_family_t family;
57913 struct udp_table *udp_table;
57914 - struct file_operations seq_fops;
57915 - struct seq_operations seq_ops;
57916 + file_operations_no_const seq_fops;
57917 + seq_operations_no_const seq_ops;
57918 };
57919
57920 struct udp_iter_state {
57921 diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
57922 --- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
57923 +++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
57924 @@ -505,7 +505,7 @@ struct xfrm_policy {
57925 struct timer_list timer;
57926
57927 struct flow_cache_object flo;
57928 - atomic_t genid;
57929 + atomic_unchecked_t genid;
57930 u32 priority;
57931 u32 index;
57932 struct xfrm_mark mark;
57933 diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
57934 --- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
57935 +++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
57936 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
57937 int backlog);
57938
57939 int (*destroy_listen)(struct iw_cm_id *cm_id);
57940 -};
57941 +} __no_const;
57942
57943 /**
57944 * iw_create_cm_id - Create an IW CM identifier.
57945 diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
57946 --- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
57947 +++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
57948 @@ -750,6 +750,7 @@ struct libfc_function_template {
57949 */
57950 void (*disc_stop_final) (struct fc_lport *);
57951 };
57952 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
57953
57954 /**
57955 * struct fc_disc - Discovery context
57956 @@ -853,7 +854,7 @@ struct fc_lport {
57957 struct fc_vport *vport;
57958
57959 /* Operational Information */
57960 - struct libfc_function_template tt;
57961 + libfc_function_template_no_const tt;
57962 u8 link_up;
57963 u8 qfull;
57964 enum fc_lport_state state;
57965 diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
57966 --- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
57967 +++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
57968 @@ -161,9 +161,9 @@ struct scsi_device {
57969 unsigned int max_device_blocked; /* what device_blocked counts down from */
57970 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57971
57972 - atomic_t iorequest_cnt;
57973 - atomic_t iodone_cnt;
57974 - atomic_t ioerr_cnt;
57975 + atomic_unchecked_t iorequest_cnt;
57976 + atomic_unchecked_t iodone_cnt;
57977 + atomic_unchecked_t ioerr_cnt;
57978
57979 struct device sdev_gendev,
57980 sdev_dev;
57981 diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
57982 --- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
57983 +++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
57984 @@ -711,7 +711,7 @@ struct fc_function_template {
57985 unsigned long show_host_system_hostname:1;
57986
57987 unsigned long disable_target_scan:1;
57988 -};
57989 +} __do_const;
57990
57991
57992 /**
57993 diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
57994 --- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
57995 +++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
57996 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
57997 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
57998 unsigned char val);
57999 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58000 -};
58001 +} __no_const;
58002
58003 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58004
58005 diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58006 --- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
58007 +++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
58008 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58009 struct snd_hwdep_dsp_status *status);
58010 int (*dsp_load)(struct snd_hwdep *hw,
58011 struct snd_hwdep_dsp_image *image);
58012 -};
58013 +} __no_const;
58014
58015 struct snd_hwdep {
58016 struct snd_card *card;
58017 diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58018 --- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
58019 +++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
58020 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
58021 struct snd_info_buffer *buffer);
58022 void (*write)(struct snd_info_entry *entry,
58023 struct snd_info_buffer *buffer);
58024 -};
58025 +} __no_const;
58026
58027 struct snd_info_entry_ops {
58028 int (*open)(struct snd_info_entry *entry,
58029 diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58030 --- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
58031 +++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
58032 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
58033 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58034 int (*ack)(struct snd_pcm_substream *substream);
58035 };
58036 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58037
58038 /*
58039 *
58040 diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58041 --- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
58042 +++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
58043 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58044 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58045 int (*csp_stop) (struct snd_sb_csp * p);
58046 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58047 -};
58048 +} __no_const;
58049
58050 /*
58051 * CSP private data
58052 diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58053 --- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
58054 +++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
58055 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58056
58057 /* platform stream ops */
58058 struct snd_pcm_ops *ops;
58059 -};
58060 +} __do_const;
58061
58062 struct snd_soc_platform {
58063 const char *name;
58064 diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58065 --- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
58066 +++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
58067 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58068 spinlock_t reg_lock;
58069 spinlock_t voice_lock;
58070 wait_queue_head_t interrupt_sleep;
58071 - atomic_t interrupt_sleep_count;
58072 + atomic_unchecked_t interrupt_sleep_count;
58073 struct snd_info_entry *proc_entry;
58074 const struct firmware *dsp_microcode;
58075 const struct firmware *controller_microcode;
58076 diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58077 --- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
58078 +++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
58079 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
58080 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58081 int (*t10_pr_register)(struct se_cmd *);
58082 int (*t10_pr_clear)(struct se_cmd *);
58083 -};
58084 +} __no_const;
58085
58086 struct t10_reservation_template {
58087 /* Reservation effects all target ports */
58088 @@ -432,8 +432,8 @@ struct se_transport_task {
58089 atomic_t t_task_cdbs_left;
58090 atomic_t t_task_cdbs_ex_left;
58091 atomic_t t_task_cdbs_timeout_left;
58092 - atomic_t t_task_cdbs_sent;
58093 - atomic_t t_transport_aborted;
58094 + atomic_unchecked_t t_task_cdbs_sent;
58095 + atomic_unchecked_t t_transport_aborted;
58096 atomic_t t_transport_active;
58097 atomic_t t_transport_complete;
58098 atomic_t t_transport_queue_active;
58099 @@ -774,7 +774,7 @@ struct se_device {
58100 atomic_t active_cmds;
58101 atomic_t simple_cmds;
58102 atomic_t depth_left;
58103 - atomic_t dev_ordered_id;
58104 + atomic_unchecked_t dev_ordered_id;
58105 atomic_t dev_tur_active;
58106 atomic_t execute_tasks;
58107 atomic_t dev_status_thr_count;
58108 diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58109 --- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
58110 +++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
58111 @@ -36,7 +36,7 @@ struct softirq_action;
58112 */
58113 TRACE_EVENT(irq_handler_entry,
58114
58115 - TP_PROTO(int irq, struct irqaction *action),
58116 + TP_PROTO(int irq, const struct irqaction *action),
58117
58118 TP_ARGS(irq, action),
58119
58120 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58121 */
58122 TRACE_EVENT(irq_handler_exit,
58123
58124 - TP_PROTO(int irq, struct irqaction *action, int ret),
58125 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58126
58127 TP_ARGS(irq, action, ret),
58128
58129 diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58130 --- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
58131 +++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
58132 @@ -51,10 +51,10 @@ struct dlfb_data {
58133 int base8;
58134 u32 pseudo_palette[256];
58135 /* blit-only rendering path metrics, exposed through sysfs */
58136 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58137 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58138 - atomic_t bytes_sent; /* to usb, after compression including overhead */
58139 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58140 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58141 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58142 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58143 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58144 };
58145
58146 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58147 diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58148 --- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58149 +++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58150 @@ -177,6 +177,7 @@ struct uvesafb_par {
58151 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58152 u8 pmi_setpal; /* PMI for palette changes */
58153 u16 *pmi_base; /* protected mode interface location */
58154 + u8 *pmi_code; /* protected mode code location */
58155 void *pmi_start;
58156 void *pmi_pal;
58157 u8 *vbe_state_orig; /*
58158 diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58159 --- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
58160 +++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
58161 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58162
58163 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58164 {
58165 - int err = sys_mount(name, "/root", fs, flags, data);
58166 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58167 if (err)
58168 return err;
58169
58170 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58171 va_start(args, fmt);
58172 vsprintf(buf, fmt, args);
58173 va_end(args);
58174 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58175 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58176 if (fd >= 0) {
58177 sys_ioctl(fd, FDEJECT, 0);
58178 sys_close(fd);
58179 }
58180 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58181 - fd = sys_open("/dev/console", O_RDWR, 0);
58182 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58183 if (fd >= 0) {
58184 sys_ioctl(fd, TCGETS, (long)&termios);
58185 termios.c_lflag &= ~ICANON;
58186 sys_ioctl(fd, TCSETSF, (long)&termios);
58187 - sys_read(fd, &c, 1);
58188 + sys_read(fd, (char __user *)&c, 1);
58189 termios.c_lflag |= ICANON;
58190 sys_ioctl(fd, TCSETSF, (long)&termios);
58191 sys_close(fd);
58192 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58193 mount_root();
58194 out:
58195 devtmpfs_mount("dev");
58196 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58197 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58198 sys_chroot((const char __user __force *)".");
58199 }
58200 diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58201 --- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
58202 +++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
58203 @@ -15,15 +15,15 @@ extern int root_mountflags;
58204
58205 static inline int create_dev(char *name, dev_t dev)
58206 {
58207 - sys_unlink(name);
58208 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58209 + sys_unlink((__force char __user *)name);
58210 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58211 }
58212
58213 #if BITS_PER_LONG == 32
58214 static inline u32 bstat(char *name)
58215 {
58216 struct stat64 stat;
58217 - if (sys_stat64(name, &stat) != 0)
58218 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58219 return 0;
58220 if (!S_ISBLK(stat.st_mode))
58221 return 0;
58222 diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58223 --- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58224 +++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58225 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58226 create_dev("/dev/root.old", Root_RAM0);
58227 /* mount initrd on rootfs' /root */
58228 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58229 - sys_mkdir("/old", 0700);
58230 - root_fd = sys_open("/", 0, 0);
58231 - old_fd = sys_open("/old", 0, 0);
58232 + sys_mkdir((__force const char __user *)"/old", 0700);
58233 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58234 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58235 /* move initrd over / and chdir/chroot in initrd root */
58236 - sys_chdir("/root");
58237 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58238 - sys_chroot(".");
58239 + sys_chdir((__force const char __user *)"/root");
58240 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58241 + sys_chroot((__force const char __user *)".");
58242
58243 /*
58244 * In case that a resume from disk is carried out by linuxrc or one of
58245 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58246
58247 /* move initrd to rootfs' /old */
58248 sys_fchdir(old_fd);
58249 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58250 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58251 /* switch root and cwd back to / of rootfs */
58252 sys_fchdir(root_fd);
58253 - sys_chroot(".");
58254 + sys_chroot((__force const char __user *)".");
58255 sys_close(old_fd);
58256 sys_close(root_fd);
58257
58258 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58259 - sys_chdir("/old");
58260 + sys_chdir((__force const char __user *)"/old");
58261 return;
58262 }
58263
58264 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58265 mount_root();
58266
58267 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58268 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58269 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58270 if (!error)
58271 printk("okay\n");
58272 else {
58273 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58274 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58275 if (error == -ENOENT)
58276 printk("/initrd does not exist. Ignored.\n");
58277 else
58278 printk("failed\n");
58279 printk(KERN_NOTICE "Unmounting old root\n");
58280 - sys_umount("/old", MNT_DETACH);
58281 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58282 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58283 if (fd < 0) {
58284 error = fd;
58285 @@ -116,11 +116,11 @@ int __init initrd_load(void)
58286 * mounted in the normal path.
58287 */
58288 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58289 - sys_unlink("/initrd.image");
58290 + sys_unlink((__force const char __user *)"/initrd.image");
58291 handle_initrd();
58292 return 1;
58293 }
58294 }
58295 - sys_unlink("/initrd.image");
58296 + sys_unlink((__force const char __user *)"/initrd.image");
58297 return 0;
58298 }
58299 diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58300 --- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
58301 +++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
58302 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58303 partitioned ? "_d" : "", minor,
58304 md_setup_args[ent].device_names);
58305
58306 - fd = sys_open(name, 0, 0);
58307 + fd = sys_open((__force char __user *)name, 0, 0);
58308 if (fd < 0) {
58309 printk(KERN_ERR "md: open failed - cannot start "
58310 "array %s\n", name);
58311 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58312 * array without it
58313 */
58314 sys_close(fd);
58315 - fd = sys_open(name, 0, 0);
58316 + fd = sys_open((__force char __user *)name, 0, 0);
58317 sys_ioctl(fd, BLKRRPART, 0);
58318 }
58319 sys_close(fd);
58320 diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58321 --- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
58322 +++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
58323 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58324 }
58325 }
58326
58327 -static long __init do_utime(char __user *filename, time_t mtime)
58328 +static long __init do_utime(__force char __user *filename, time_t mtime)
58329 {
58330 struct timespec t[2];
58331
58332 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58333 struct dir_entry *de, *tmp;
58334 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58335 list_del(&de->list);
58336 - do_utime(de->name, de->mtime);
58337 + do_utime((__force char __user *)de->name, de->mtime);
58338 kfree(de->name);
58339 kfree(de);
58340 }
58341 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58342 if (nlink >= 2) {
58343 char *old = find_link(major, minor, ino, mode, collected);
58344 if (old)
58345 - return (sys_link(old, collected) < 0) ? -1 : 1;
58346 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58347 }
58348 return 0;
58349 }
58350 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58351 {
58352 struct stat st;
58353
58354 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58355 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58356 if (S_ISDIR(st.st_mode))
58357 - sys_rmdir(path);
58358 + sys_rmdir((__force char __user *)path);
58359 else
58360 - sys_unlink(path);
58361 + sys_unlink((__force char __user *)path);
58362 }
58363 }
58364
58365 @@ -305,7 +305,7 @@ static int __init do_name(void)
58366 int openflags = O_WRONLY|O_CREAT;
58367 if (ml != 1)
58368 openflags |= O_TRUNC;
58369 - wfd = sys_open(collected, openflags, mode);
58370 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58371
58372 if (wfd >= 0) {
58373 sys_fchown(wfd, uid, gid);
58374 @@ -317,17 +317,17 @@ static int __init do_name(void)
58375 }
58376 }
58377 } else if (S_ISDIR(mode)) {
58378 - sys_mkdir(collected, mode);
58379 - sys_chown(collected, uid, gid);
58380 - sys_chmod(collected, mode);
58381 + sys_mkdir((__force char __user *)collected, mode);
58382 + sys_chown((__force char __user *)collected, uid, gid);
58383 + sys_chmod((__force char __user *)collected, mode);
58384 dir_add(collected, mtime);
58385 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58386 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58387 if (maybe_link() == 0) {
58388 - sys_mknod(collected, mode, rdev);
58389 - sys_chown(collected, uid, gid);
58390 - sys_chmod(collected, mode);
58391 - do_utime(collected, mtime);
58392 + sys_mknod((__force char __user *)collected, mode, rdev);
58393 + sys_chown((__force char __user *)collected, uid, gid);
58394 + sys_chmod((__force char __user *)collected, mode);
58395 + do_utime((__force char __user *)collected, mtime);
58396 }
58397 }
58398 return 0;
58399 @@ -336,15 +336,15 @@ static int __init do_name(void)
58400 static int __init do_copy(void)
58401 {
58402 if (count >= body_len) {
58403 - sys_write(wfd, victim, body_len);
58404 + sys_write(wfd, (__force char __user *)victim, body_len);
58405 sys_close(wfd);
58406 - do_utime(vcollected, mtime);
58407 + do_utime((__force char __user *)vcollected, mtime);
58408 kfree(vcollected);
58409 eat(body_len);
58410 state = SkipIt;
58411 return 0;
58412 } else {
58413 - sys_write(wfd, victim, count);
58414 + sys_write(wfd, (__force char __user *)victim, count);
58415 body_len -= count;
58416 eat(count);
58417 return 1;
58418 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58419 {
58420 collected[N_ALIGN(name_len) + body_len] = '\0';
58421 clean_path(collected, 0);
58422 - sys_symlink(collected + N_ALIGN(name_len), collected);
58423 - sys_lchown(collected, uid, gid);
58424 - do_utime(collected, mtime);
58425 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58426 + sys_lchown((__force char __user *)collected, uid, gid);
58427 + do_utime((__force char __user *)collected, mtime);
58428 state = SkipIt;
58429 next_state = Reset;
58430 return 0;
58431 diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
58432 --- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
58433 +++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
58434 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
58435
58436 config COMPAT_BRK
58437 bool "Disable heap randomization"
58438 - default y
58439 + default n
58440 help
58441 Randomizing heap placement makes heap exploits harder, but it
58442 also breaks ancient binaries (including anything libc5 based).
58443 diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
58444 --- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
58445 +++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
58446 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
58447 extern void tc_init(void);
58448 #endif
58449
58450 +extern void grsecurity_init(void);
58451 +
58452 /*
58453 * Debug helper: via this flag we know that we are in 'early bootup code'
58454 * where only the boot processor is running with IRQ disabled. This means
58455 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
58456
58457 __setup("reset_devices", set_reset_devices);
58458
58459 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58460 +extern char pax_enter_kernel_user[];
58461 +extern char pax_exit_kernel_user[];
58462 +extern pgdval_t clone_pgd_mask;
58463 +#endif
58464 +
58465 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58466 +static int __init setup_pax_nouderef(char *str)
58467 +{
58468 +#ifdef CONFIG_X86_32
58469 + unsigned int cpu;
58470 + struct desc_struct *gdt;
58471 +
58472 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58473 + gdt = get_cpu_gdt_table(cpu);
58474 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58475 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58476 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58477 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58478 + }
58479 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58480 +#else
58481 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58482 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58483 + clone_pgd_mask = ~(pgdval_t)0UL;
58484 +#endif
58485 +
58486 + return 0;
58487 +}
58488 +early_param("pax_nouderef", setup_pax_nouderef);
58489 +#endif
58490 +
58491 +#ifdef CONFIG_PAX_SOFTMODE
58492 +int pax_softmode;
58493 +
58494 +static int __init setup_pax_softmode(char *str)
58495 +{
58496 + get_option(&str, &pax_softmode);
58497 + return 1;
58498 +}
58499 +__setup("pax_softmode=", setup_pax_softmode);
58500 +#endif
58501 +
58502 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58503 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58504 static const char *panic_later, *panic_param;
58505 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
58506 {
58507 int count = preempt_count();
58508 int ret;
58509 + const char *msg1 = "", *msg2 = "";
58510
58511 if (initcall_debug)
58512 ret = do_one_initcall_debug(fn);
58513 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
58514 sprintf(msgbuf, "error code %d ", ret);
58515
58516 if (preempt_count() != count) {
58517 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58518 + msg1 = " preemption imbalance";
58519 preempt_count() = count;
58520 }
58521 if (irqs_disabled()) {
58522 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58523 + msg2 = " disabled interrupts";
58524 local_irq_enable();
58525 }
58526 - if (msgbuf[0]) {
58527 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58528 + if (msgbuf[0] || *msg1 || *msg2) {
58529 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58530 }
58531
58532 return ret;
58533 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
58534 do_basic_setup();
58535
58536 /* Open the /dev/console on the rootfs, this should never fail */
58537 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
58538 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
58539 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
58540
58541 (void) sys_dup(0);
58542 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
58543 if (!ramdisk_execute_command)
58544 ramdisk_execute_command = "/init";
58545
58546 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58547 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58548 ramdisk_execute_command = NULL;
58549 prepare_namespace();
58550 }
58551
58552 + grsecurity_init();
58553 +
58554 /*
58555 * Ok, we have completed the initial bootup, and
58556 * we're essentially up and running. Get rid of the
58557 diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
58558 --- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
58559 +++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
58560 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
58561 mq_bytes = (mq_msg_tblsz +
58562 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58563
58564 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58565 spin_lock(&mq_lock);
58566 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58567 u->mq_bytes + mq_bytes >
58568 diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
58569 --- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
58570 +++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
58571 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
58572 return security_msg_queue_associate(msq, msgflg);
58573 }
58574
58575 +static struct ipc_ops msg_ops = {
58576 + .getnew = newque,
58577 + .associate = msg_security,
58578 + .more_checks = NULL
58579 +};
58580 +
58581 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
58582 {
58583 struct ipc_namespace *ns;
58584 - struct ipc_ops msg_ops;
58585 struct ipc_params msg_params;
58586
58587 ns = current->nsproxy->ipc_ns;
58588
58589 - msg_ops.getnew = newque;
58590 - msg_ops.associate = msg_security;
58591 - msg_ops.more_checks = NULL;
58592 -
58593 msg_params.key = key;
58594 msg_params.flg = msgflg;
58595
58596 diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
58597 --- linux-3.0.4/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
58598 +++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
58599 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
58600 return 0;
58601 }
58602
58603 +static struct ipc_ops sem_ops = {
58604 + .getnew = newary,
58605 + .associate = sem_security,
58606 + .more_checks = sem_more_checks
58607 +};
58608 +
58609 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
58610 {
58611 struct ipc_namespace *ns;
58612 - struct ipc_ops sem_ops;
58613 struct ipc_params sem_params;
58614
58615 ns = current->nsproxy->ipc_ns;
58616 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
58617 if (nsems < 0 || nsems > ns->sc_semmsl)
58618 return -EINVAL;
58619
58620 - sem_ops.getnew = newary;
58621 - sem_ops.associate = sem_security;
58622 - sem_ops.more_checks = sem_more_checks;
58623 -
58624 sem_params.key = key;
58625 sem_params.flg = semflg;
58626 sem_params.u.nsems = nsems;
58627 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
58628 int nsems;
58629 struct list_head tasks;
58630
58631 + pax_track_stack();
58632 +
58633 sma = sem_lock_check(ns, semid);
58634 if (IS_ERR(sma))
58635 return PTR_ERR(sma);
58636 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58637 struct ipc_namespace *ns;
58638 struct list_head tasks;
58639
58640 + pax_track_stack();
58641 +
58642 ns = current->nsproxy->ipc_ns;
58643
58644 if (nsops < 1 || semid < 0)
58645 diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
58646 --- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
58647 +++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
58648 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
58649 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58650 #endif
58651
58652 +#ifdef CONFIG_GRKERNSEC
58653 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58654 + const time_t shm_createtime, const uid_t cuid,
58655 + const int shmid);
58656 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58657 + const time_t shm_createtime);
58658 +#endif
58659 +
58660 void shm_init_ns(struct ipc_namespace *ns)
58661 {
58662 ns->shm_ctlmax = SHMMAX;
58663 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
58664 shp->shm_lprid = 0;
58665 shp->shm_atim = shp->shm_dtim = 0;
58666 shp->shm_ctim = get_seconds();
58667 +#ifdef CONFIG_GRKERNSEC
58668 + {
58669 + struct timespec timeval;
58670 + do_posix_clock_monotonic_gettime(&timeval);
58671 +
58672 + shp->shm_createtime = timeval.tv_sec;
58673 + }
58674 +#endif
58675 shp->shm_segsz = size;
58676 shp->shm_nattch = 0;
58677 shp->shm_file = file;
58678 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
58679 return 0;
58680 }
58681
58682 +static struct ipc_ops shm_ops = {
58683 + .getnew = newseg,
58684 + .associate = shm_security,
58685 + .more_checks = shm_more_checks
58686 +};
58687 +
58688 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
58689 {
58690 struct ipc_namespace *ns;
58691 - struct ipc_ops shm_ops;
58692 struct ipc_params shm_params;
58693
58694 ns = current->nsproxy->ipc_ns;
58695
58696 - shm_ops.getnew = newseg;
58697 - shm_ops.associate = shm_security;
58698 - shm_ops.more_checks = shm_more_checks;
58699 -
58700 shm_params.key = key;
58701 shm_params.flg = shmflg;
58702 shm_params.u.size = size;
58703 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
58704 case SHM_LOCK:
58705 case SHM_UNLOCK:
58706 {
58707 - struct file *uninitialized_var(shm_file);
58708 -
58709 lru_add_drain_all(); /* drain pagevecs to lru lists */
58710
58711 shp = shm_lock_check(ns, shmid);
58712 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
58713 if (err)
58714 goto out_unlock;
58715
58716 +#ifdef CONFIG_GRKERNSEC
58717 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58718 + shp->shm_perm.cuid, shmid) ||
58719 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58720 + err = -EACCES;
58721 + goto out_unlock;
58722 + }
58723 +#endif
58724 +
58725 path = shp->shm_file->f_path;
58726 path_get(&path);
58727 shp->shm_nattch++;
58728 +#ifdef CONFIG_GRKERNSEC
58729 + shp->shm_lapid = current->pid;
58730 +#endif
58731 size = i_size_read(path.dentry->d_inode);
58732 shm_unlock(shp);
58733
58734 diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
58735 --- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
58736 +++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
58737 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
58738 */
58739 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58740 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58741 - file->f_op->write(file, (char *)&ac,
58742 + file->f_op->write(file, (__force char __user *)&ac,
58743 sizeof(acct_t), &file->f_pos);
58744 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58745 set_fs(fs);
58746 diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
58747 --- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
58748 +++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
58749 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
58750 3) suppressed due to audit_rate_limit
58751 4) suppressed due to audit_backlog_limit
58752 */
58753 -static atomic_t audit_lost = ATOMIC_INIT(0);
58754 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58755
58756 /* The netlink socket. */
58757 static struct sock *audit_sock;
58758 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
58759 unsigned long now;
58760 int print;
58761
58762 - atomic_inc(&audit_lost);
58763 + atomic_inc_unchecked(&audit_lost);
58764
58765 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58766
58767 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
58768 printk(KERN_WARNING
58769 "audit: audit_lost=%d audit_rate_limit=%d "
58770 "audit_backlog_limit=%d\n",
58771 - atomic_read(&audit_lost),
58772 + atomic_read_unchecked(&audit_lost),
58773 audit_rate_limit,
58774 audit_backlog_limit);
58775 audit_panic(message);
58776 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
58777 status_set.pid = audit_pid;
58778 status_set.rate_limit = audit_rate_limit;
58779 status_set.backlog_limit = audit_backlog_limit;
58780 - status_set.lost = atomic_read(&audit_lost);
58781 + status_set.lost = atomic_read_unchecked(&audit_lost);
58782 status_set.backlog = skb_queue_len(&audit_skb_queue);
58783 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58784 &status_set, sizeof(status_set));
58785 diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
58786 --- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
58787 +++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
58788 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
58789 }
58790
58791 /* global counter which is incremented every time something logs in */
58792 -static atomic_t session_id = ATOMIC_INIT(0);
58793 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58794
58795 /**
58796 * audit_set_loginuid - set a task's audit_context loginuid
58797 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
58798 */
58799 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58800 {
58801 - unsigned int sessionid = atomic_inc_return(&session_id);
58802 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58803 struct audit_context *context = task->audit_context;
58804
58805 if (context && context->in_syscall) {
58806 diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
58807 --- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
58808 +++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
58809 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
58810 * before modification is attempted and the application
58811 * fails.
58812 */
58813 + if (tocopy > ARRAY_SIZE(kdata))
58814 + return -EFAULT;
58815 +
58816 if (copy_to_user(dataptr, kdata, tocopy
58817 * sizeof(struct __user_cap_data_struct))) {
58818 return -EFAULT;
58819 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
58820 BUG();
58821 }
58822
58823 - if (security_capable(ns, current_cred(), cap) == 0) {
58824 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
58825 current->flags |= PF_SUPERPRIV;
58826 return true;
58827 }
58828 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
58829 }
58830 EXPORT_SYMBOL(ns_capable);
58831
58832 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
58833 +{
58834 + if (unlikely(!cap_valid(cap))) {
58835 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58836 + BUG();
58837 + }
58838 +
58839 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
58840 + current->flags |= PF_SUPERPRIV;
58841 + return true;
58842 + }
58843 + return false;
58844 +}
58845 +EXPORT_SYMBOL(ns_capable_nolog);
58846 +
58847 +bool capable_nolog(int cap)
58848 +{
58849 + return ns_capable_nolog(&init_user_ns, cap);
58850 +}
58851 +EXPORT_SYMBOL(capable_nolog);
58852 +
58853 /**
58854 * task_ns_capable - Determine whether current task has a superior
58855 * capability targeted at a specific task's user namespace.
58856 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
58857 }
58858 EXPORT_SYMBOL(task_ns_capable);
58859
58860 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
58861 +{
58862 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
58863 +}
58864 +EXPORT_SYMBOL(task_ns_capable_nolog);
58865 +
58866 /**
58867 * nsown_capable - Check superior capability to one's own user_ns
58868 * @cap: The capability in question
58869 diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
58870 --- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
58871 +++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
58872 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
58873 struct hlist_head *hhead;
58874 struct cg_cgroup_link *link;
58875
58876 + pax_track_stack();
58877 +
58878 /* First see if we already have a cgroup group that matches
58879 * the desired set */
58880 read_lock(&css_set_lock);
58881 diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
58882 --- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
58883 +++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
58884 @@ -13,6 +13,7 @@
58885
58886 #include <linux/linkage.h>
58887 #include <linux/compat.h>
58888 +#include <linux/module.h>
58889 #include <linux/errno.h>
58890 #include <linux/time.h>
58891 #include <linux/signal.h>
58892 diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
58893 --- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
58894 +++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
58895 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58896 struct proc_dir_entry *entry;
58897
58898 /* create the current config file */
58899 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58900 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58901 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58902 + &ikconfig_file_ops);
58903 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58904 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58905 + &ikconfig_file_ops);
58906 +#endif
58907 +#else
58908 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58909 &ikconfig_file_ops);
58910 +#endif
58911 +
58912 if (!entry)
58913 return -ENOMEM;
58914
58915 diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
58916 --- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
58917 +++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
58918 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
58919 */
58920 void __put_cred(struct cred *cred)
58921 {
58922 + pax_track_stack();
58923 +
58924 kdebug("__put_cred(%p{%d,%d})", cred,
58925 atomic_read(&cred->usage),
58926 read_cred_subscribers(cred));
58927 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
58928 {
58929 struct cred *cred;
58930
58931 + pax_track_stack();
58932 +
58933 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58934 atomic_read(&tsk->cred->usage),
58935 read_cred_subscribers(tsk->cred));
58936 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
58937 {
58938 const struct cred *cred;
58939
58940 + pax_track_stack();
58941 +
58942 rcu_read_lock();
58943
58944 do {
58945 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
58946 {
58947 struct cred *new;
58948
58949 + pax_track_stack();
58950 +
58951 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58952 if (!new)
58953 return NULL;
58954 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
58955 const struct cred *old;
58956 struct cred *new;
58957
58958 + pax_track_stack();
58959 +
58960 validate_process_creds();
58961
58962 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58963 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
58964 struct thread_group_cred *tgcred = NULL;
58965 struct cred *new;
58966
58967 + pax_track_stack();
58968 +
58969 #ifdef CONFIG_KEYS
58970 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58971 if (!tgcred)
58972 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
58973 struct cred *new;
58974 int ret;
58975
58976 + pax_track_stack();
58977 +
58978 if (
58979 #ifdef CONFIG_KEYS
58980 !p->cred->thread_keyring &&
58981 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
58982 struct task_struct *task = current;
58983 const struct cred *old = task->real_cred;
58984
58985 + pax_track_stack();
58986 +
58987 kdebug("commit_creds(%p{%d,%d})", new,
58988 atomic_read(&new->usage),
58989 read_cred_subscribers(new));
58990 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
58991
58992 get_cred(new); /* we will require a ref for the subj creds too */
58993
58994 + gr_set_role_label(task, new->uid, new->gid);
58995 +
58996 /* dumpability changes */
58997 if (old->euid != new->euid ||
58998 old->egid != new->egid ||
58999 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59000 key_fsgid_changed(task);
59001
59002 /* do it
59003 - * - What if a process setreuid()'s and this brings the
59004 - * new uid over his NPROC rlimit? We can check this now
59005 - * cheaply with the new uid cache, so if it matters
59006 - * we should be checking for it. -DaveM
59007 + * RLIMIT_NPROC limits on user->processes have already been checked
59008 + * in set_user().
59009 */
59010 alter_cred_subscribers(new, 2);
59011 if (new->user != old->user)
59012 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59013 */
59014 void abort_creds(struct cred *new)
59015 {
59016 + pax_track_stack();
59017 +
59018 kdebug("abort_creds(%p{%d,%d})", new,
59019 atomic_read(&new->usage),
59020 read_cred_subscribers(new));
59021 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
59022 {
59023 const struct cred *old = current->cred;
59024
59025 + pax_track_stack();
59026 +
59027 kdebug("override_creds(%p{%d,%d})", new,
59028 atomic_read(&new->usage),
59029 read_cred_subscribers(new));
59030 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59031 {
59032 const struct cred *override = current->cred;
59033
59034 + pax_track_stack();
59035 +
59036 kdebug("revert_creds(%p{%d,%d})", old,
59037 atomic_read(&old->usage),
59038 read_cred_subscribers(old));
59039 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
59040 const struct cred *old;
59041 struct cred *new;
59042
59043 + pax_track_stack();
59044 +
59045 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59046 if (!new)
59047 return NULL;
59048 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59049 */
59050 int set_security_override(struct cred *new, u32 secid)
59051 {
59052 + pax_track_stack();
59053 +
59054 return security_kernel_act_as(new, secid);
59055 }
59056 EXPORT_SYMBOL(set_security_override);
59057 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59058 u32 secid;
59059 int ret;
59060
59061 + pax_track_stack();
59062 +
59063 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59064 if (ret < 0)
59065 return ret;
59066 diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59067 --- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
59068 +++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
59069 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59070 */
59071 static atomic_t masters_in_kgdb;
59072 static atomic_t slaves_in_kgdb;
59073 -static atomic_t kgdb_break_tasklet_var;
59074 +static atomic_unchecked_t kgdb_break_tasklet_var;
59075 atomic_t kgdb_setting_breakpoint;
59076
59077 struct task_struct *kgdb_usethread;
59078 @@ -129,7 +129,7 @@ int kgdb_single_step;
59079 static pid_t kgdb_sstep_pid;
59080
59081 /* to keep track of the CPU which is doing the single stepping*/
59082 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59083 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59084
59085 /*
59086 * If you are debugging a problem where roundup (the collection of
59087 @@ -542,7 +542,7 @@ return_normal:
59088 * kernel will only try for the value of sstep_tries before
59089 * giving up and continuing on.
59090 */
59091 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59092 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59093 (kgdb_info[cpu].task &&
59094 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59095 atomic_set(&kgdb_active, -1);
59096 @@ -636,8 +636,8 @@ cpu_master_loop:
59097 }
59098
59099 kgdb_restore:
59100 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59101 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59102 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59103 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59104 if (kgdb_info[sstep_cpu].task)
59105 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59106 else
59107 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59108 static void kgdb_tasklet_bpt(unsigned long ing)
59109 {
59110 kgdb_breakpoint();
59111 - atomic_set(&kgdb_break_tasklet_var, 0);
59112 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59113 }
59114
59115 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59116
59117 void kgdb_schedule_breakpoint(void)
59118 {
59119 - if (atomic_read(&kgdb_break_tasklet_var) ||
59120 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59121 atomic_read(&kgdb_active) != -1 ||
59122 atomic_read(&kgdb_setting_breakpoint))
59123 return;
59124 - atomic_inc(&kgdb_break_tasklet_var);
59125 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
59126 tasklet_schedule(&kgdb_tasklet_breakpoint);
59127 }
59128 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59129 diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59130 --- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
59131 +++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
59132 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59133 list_for_each_entry(mod, kdb_modules, list) {
59134
59135 kdb_printf("%-20s%8u 0x%p ", mod->name,
59136 - mod->core_size, (void *)mod);
59137 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
59138 #ifdef CONFIG_MODULE_UNLOAD
59139 kdb_printf("%4d ", module_refcount(mod));
59140 #endif
59141 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59142 kdb_printf(" (Loading)");
59143 else
59144 kdb_printf(" (Live)");
59145 - kdb_printf(" 0x%p", mod->module_core);
59146 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
59147
59148 #ifdef CONFIG_MODULE_UNLOAD
59149 {
59150 diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59151 --- linux-3.0.4/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
59152 +++ linux-3.0.4/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
59153 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59154 return 0;
59155 }
59156
59157 -static atomic64_t perf_event_id;
59158 +static atomic64_unchecked_t perf_event_id;
59159
59160 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59161 enum event_type_t event_type);
59162 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59163
59164 static inline u64 perf_event_count(struct perf_event *event)
59165 {
59166 - return local64_read(&event->count) + atomic64_read(&event->child_count);
59167 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59168 }
59169
59170 static u64 perf_event_read(struct perf_event *event)
59171 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59172 mutex_lock(&event->child_mutex);
59173 total += perf_event_read(event);
59174 *enabled += event->total_time_enabled +
59175 - atomic64_read(&event->child_total_time_enabled);
59176 + atomic64_read_unchecked(&event->child_total_time_enabled);
59177 *running += event->total_time_running +
59178 - atomic64_read(&event->child_total_time_running);
59179 + atomic64_read_unchecked(&event->child_total_time_running);
59180
59181 list_for_each_entry(child, &event->child_list, child_list) {
59182 total += perf_event_read(child);
59183 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59184 userpg->offset -= local64_read(&event->hw.prev_count);
59185
59186 userpg->time_enabled = event->total_time_enabled +
59187 - atomic64_read(&event->child_total_time_enabled);
59188 + atomic64_read_unchecked(&event->child_total_time_enabled);
59189
59190 userpg->time_running = event->total_time_running +
59191 - atomic64_read(&event->child_total_time_running);
59192 + atomic64_read_unchecked(&event->child_total_time_running);
59193
59194 barrier();
59195 ++userpg->lock;
59196 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
59197 values[n++] = perf_event_count(event);
59198 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59199 values[n++] = enabled +
59200 - atomic64_read(&event->child_total_time_enabled);
59201 + atomic64_read_unchecked(&event->child_total_time_enabled);
59202 }
59203 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59204 values[n++] = running +
59205 - atomic64_read(&event->child_total_time_running);
59206 + atomic64_read_unchecked(&event->child_total_time_running);
59207 }
59208 if (read_format & PERF_FORMAT_ID)
59209 values[n++] = primary_event_id(event);
59210 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
59211 event->parent = parent_event;
59212
59213 event->ns = get_pid_ns(current->nsproxy->pid_ns);
59214 - event->id = atomic64_inc_return(&perf_event_id);
59215 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
59216
59217 event->state = PERF_EVENT_STATE_INACTIVE;
59218
59219 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59220 /*
59221 * Add back the child's count to the parent's count:
59222 */
59223 - atomic64_add(child_val, &parent_event->child_count);
59224 - atomic64_add(child_event->total_time_enabled,
59225 + atomic64_add_unchecked(child_val, &parent_event->child_count);
59226 + atomic64_add_unchecked(child_event->total_time_enabled,
59227 &parent_event->child_total_time_enabled);
59228 - atomic64_add(child_event->total_time_running,
59229 + atomic64_add_unchecked(child_event->total_time_running,
59230 &parent_event->child_total_time_running);
59231
59232 /*
59233 diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59234 --- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
59235 +++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
59236 @@ -57,6 +57,10 @@
59237 #include <asm/pgtable.h>
59238 #include <asm/mmu_context.h>
59239
59240 +#ifdef CONFIG_GRKERNSEC
59241 +extern rwlock_t grsec_exec_file_lock;
59242 +#endif
59243 +
59244 static void exit_mm(struct task_struct * tsk);
59245
59246 static void __unhash_process(struct task_struct *p, bool group_dead)
59247 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59248 struct task_struct *leader;
59249 int zap_leader;
59250 repeat:
59251 +#ifdef CONFIG_NET
59252 + gr_del_task_from_ip_table(p);
59253 +#endif
59254 +
59255 tracehook_prepare_release_task(p);
59256 /* don't need to get the RCU readlock here - the process is dead and
59257 * can't be modifying its own credentials. But shut RCU-lockdep up */
59258 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59259 {
59260 write_lock_irq(&tasklist_lock);
59261
59262 +#ifdef CONFIG_GRKERNSEC
59263 + write_lock(&grsec_exec_file_lock);
59264 + if (current->exec_file) {
59265 + fput(current->exec_file);
59266 + current->exec_file = NULL;
59267 + }
59268 + write_unlock(&grsec_exec_file_lock);
59269 +#endif
59270 +
59271 ptrace_unlink(current);
59272 /* Reparent to init */
59273 current->real_parent = current->parent = kthreadd_task;
59274 list_move_tail(&current->sibling, &current->real_parent->children);
59275
59276 + gr_set_kernel_label(current);
59277 +
59278 /* Set the exit signal to SIGCHLD so we signal init on exit */
59279 current->exit_signal = SIGCHLD;
59280
59281 @@ -394,7 +413,7 @@ int allow_signal(int sig)
59282 * know it'll be handled, so that they don't get converted to
59283 * SIGKILL or just silently dropped.
59284 */
59285 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59286 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59287 recalc_sigpending();
59288 spin_unlock_irq(&current->sighand->siglock);
59289 return 0;
59290 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59291 vsnprintf(current->comm, sizeof(current->comm), name, args);
59292 va_end(args);
59293
59294 +#ifdef CONFIG_GRKERNSEC
59295 + write_lock(&grsec_exec_file_lock);
59296 + if (current->exec_file) {
59297 + fput(current->exec_file);
59298 + current->exec_file = NULL;
59299 + }
59300 + write_unlock(&grsec_exec_file_lock);
59301 +#endif
59302 +
59303 + gr_set_kernel_label(current);
59304 +
59305 /*
59306 * If we were started as result of loading a module, close all of the
59307 * user space pages. We don't need them, and if we didn't close them
59308 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59309 struct task_struct *tsk = current;
59310 int group_dead;
59311
59312 - profile_task_exit(tsk);
59313 -
59314 - WARN_ON(atomic_read(&tsk->fs_excl));
59315 - WARN_ON(blk_needs_flush_plug(tsk));
59316 -
59317 if (unlikely(in_interrupt()))
59318 panic("Aiee, killing interrupt handler!");
59319 - if (unlikely(!tsk->pid))
59320 - panic("Attempted to kill the idle task!");
59321
59322 /*
59323 * If do_exit is called because this processes oopsed, it's possible
59324 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59325 */
59326 set_fs(USER_DS);
59327
59328 + profile_task_exit(tsk);
59329 +
59330 + WARN_ON(atomic_read(&tsk->fs_excl));
59331 + WARN_ON(blk_needs_flush_plug(tsk));
59332 +
59333 + if (unlikely(!tsk->pid))
59334 + panic("Attempted to kill the idle task!");
59335 +
59336 tracehook_report_exit(&code);
59337
59338 validate_creds_for_do_exit(tsk);
59339 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59340 tsk->exit_code = code;
59341 taskstats_exit(tsk, group_dead);
59342
59343 + gr_acl_handle_psacct(tsk, code);
59344 + gr_acl_handle_exit();
59345 +
59346 exit_mm(tsk);
59347
59348 if (group_dead)
59349 diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59350 --- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
59351 +++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
59352 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59353 *stackend = STACK_END_MAGIC; /* for overflow detection */
59354
59355 #ifdef CONFIG_CC_STACKPROTECTOR
59356 - tsk->stack_canary = get_random_int();
59357 + tsk->stack_canary = pax_get_random_long();
59358 #endif
59359
59360 /* One for us, one for whoever does the "release_task()" (usually parent) */
59361 @@ -308,13 +308,77 @@ out:
59362 }
59363
59364 #ifdef CONFIG_MMU
59365 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59366 +{
59367 + struct vm_area_struct *tmp;
59368 + unsigned long charge;
59369 + struct mempolicy *pol;
59370 + struct file *file;
59371 +
59372 + charge = 0;
59373 + if (mpnt->vm_flags & VM_ACCOUNT) {
59374 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59375 + if (security_vm_enough_memory(len))
59376 + goto fail_nomem;
59377 + charge = len;
59378 + }
59379 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59380 + if (!tmp)
59381 + goto fail_nomem;
59382 + *tmp = *mpnt;
59383 + tmp->vm_mm = mm;
59384 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
59385 + pol = mpol_dup(vma_policy(mpnt));
59386 + if (IS_ERR(pol))
59387 + goto fail_nomem_policy;
59388 + vma_set_policy(tmp, pol);
59389 + if (anon_vma_fork(tmp, mpnt))
59390 + goto fail_nomem_anon_vma_fork;
59391 + tmp->vm_flags &= ~VM_LOCKED;
59392 + tmp->vm_next = tmp->vm_prev = NULL;
59393 + tmp->vm_mirror = NULL;
59394 + file = tmp->vm_file;
59395 + if (file) {
59396 + struct inode *inode = file->f_path.dentry->d_inode;
59397 + struct address_space *mapping = file->f_mapping;
59398 +
59399 + get_file(file);
59400 + if (tmp->vm_flags & VM_DENYWRITE)
59401 + atomic_dec(&inode->i_writecount);
59402 + mutex_lock(&mapping->i_mmap_mutex);
59403 + if (tmp->vm_flags & VM_SHARED)
59404 + mapping->i_mmap_writable++;
59405 + flush_dcache_mmap_lock(mapping);
59406 + /* insert tmp into the share list, just after mpnt */
59407 + vma_prio_tree_add(tmp, mpnt);
59408 + flush_dcache_mmap_unlock(mapping);
59409 + mutex_unlock(&mapping->i_mmap_mutex);
59410 + }
59411 +
59412 + /*
59413 + * Clear hugetlb-related page reserves for children. This only
59414 + * affects MAP_PRIVATE mappings. Faults generated by the child
59415 + * are not guaranteed to succeed, even if read-only
59416 + */
59417 + if (is_vm_hugetlb_page(tmp))
59418 + reset_vma_resv_huge_pages(tmp);
59419 +
59420 + return tmp;
59421 +
59422 +fail_nomem_anon_vma_fork:
59423 + mpol_put(pol);
59424 +fail_nomem_policy:
59425 + kmem_cache_free(vm_area_cachep, tmp);
59426 +fail_nomem:
59427 + vm_unacct_memory(charge);
59428 + return NULL;
59429 +}
59430 +
59431 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
59432 {
59433 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
59434 struct rb_node **rb_link, *rb_parent;
59435 int retval;
59436 - unsigned long charge;
59437 - struct mempolicy *pol;
59438
59439 down_write(&oldmm->mmap_sem);
59440 flush_cache_dup_mm(oldmm);
59441 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
59442 mm->locked_vm = 0;
59443 mm->mmap = NULL;
59444 mm->mmap_cache = NULL;
59445 - mm->free_area_cache = oldmm->mmap_base;
59446 - mm->cached_hole_size = ~0UL;
59447 + mm->free_area_cache = oldmm->free_area_cache;
59448 + mm->cached_hole_size = oldmm->cached_hole_size;
59449 mm->map_count = 0;
59450 cpumask_clear(mm_cpumask(mm));
59451 mm->mm_rb = RB_ROOT;
59452 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
59453
59454 prev = NULL;
59455 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
59456 - struct file *file;
59457 -
59458 if (mpnt->vm_flags & VM_DONTCOPY) {
59459 long pages = vma_pages(mpnt);
59460 mm->total_vm -= pages;
59461 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
59462 -pages);
59463 continue;
59464 }
59465 - charge = 0;
59466 - if (mpnt->vm_flags & VM_ACCOUNT) {
59467 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59468 - if (security_vm_enough_memory(len))
59469 - goto fail_nomem;
59470 - charge = len;
59471 - }
59472 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59473 - if (!tmp)
59474 - goto fail_nomem;
59475 - *tmp = *mpnt;
59476 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
59477 - pol = mpol_dup(vma_policy(mpnt));
59478 - retval = PTR_ERR(pol);
59479 - if (IS_ERR(pol))
59480 - goto fail_nomem_policy;
59481 - vma_set_policy(tmp, pol);
59482 - tmp->vm_mm = mm;
59483 - if (anon_vma_fork(tmp, mpnt))
59484 - goto fail_nomem_anon_vma_fork;
59485 - tmp->vm_flags &= ~VM_LOCKED;
59486 - tmp->vm_next = tmp->vm_prev = NULL;
59487 - file = tmp->vm_file;
59488 - if (file) {
59489 - struct inode *inode = file->f_path.dentry->d_inode;
59490 - struct address_space *mapping = file->f_mapping;
59491 -
59492 - get_file(file);
59493 - if (tmp->vm_flags & VM_DENYWRITE)
59494 - atomic_dec(&inode->i_writecount);
59495 - mutex_lock(&mapping->i_mmap_mutex);
59496 - if (tmp->vm_flags & VM_SHARED)
59497 - mapping->i_mmap_writable++;
59498 - flush_dcache_mmap_lock(mapping);
59499 - /* insert tmp into the share list, just after mpnt */
59500 - vma_prio_tree_add(tmp, mpnt);
59501 - flush_dcache_mmap_unlock(mapping);
59502 - mutex_unlock(&mapping->i_mmap_mutex);
59503 + tmp = dup_vma(mm, mpnt);
59504 + if (!tmp) {
59505 + retval = -ENOMEM;
59506 + goto out;
59507 }
59508
59509 /*
59510 - * Clear hugetlb-related page reserves for children. This only
59511 - * affects MAP_PRIVATE mappings. Faults generated by the child
59512 - * are not guaranteed to succeed, even if read-only
59513 - */
59514 - if (is_vm_hugetlb_page(tmp))
59515 - reset_vma_resv_huge_pages(tmp);
59516 -
59517 - /*
59518 * Link in the new vma and copy the page table entries.
59519 */
59520 *pprev = tmp;
59521 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
59522 if (retval)
59523 goto out;
59524 }
59525 +
59526 +#ifdef CONFIG_PAX_SEGMEXEC
59527 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59528 + struct vm_area_struct *mpnt_m;
59529 +
59530 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59531 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59532 +
59533 + if (!mpnt->vm_mirror)
59534 + continue;
59535 +
59536 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59537 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59538 + mpnt->vm_mirror = mpnt_m;
59539 + } else {
59540 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59541 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59542 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59543 + mpnt->vm_mirror->vm_mirror = mpnt;
59544 + }
59545 + }
59546 + BUG_ON(mpnt_m);
59547 + }
59548 +#endif
59549 +
59550 /* a new mm has just been created */
59551 arch_dup_mmap(oldmm, mm);
59552 retval = 0;
59553 @@ -429,14 +474,6 @@ out:
59554 flush_tlb_mm(oldmm);
59555 up_write(&oldmm->mmap_sem);
59556 return retval;
59557 -fail_nomem_anon_vma_fork:
59558 - mpol_put(pol);
59559 -fail_nomem_policy:
59560 - kmem_cache_free(vm_area_cachep, tmp);
59561 -fail_nomem:
59562 - retval = -ENOMEM;
59563 - vm_unacct_memory(charge);
59564 - goto out;
59565 }
59566
59567 static inline int mm_alloc_pgd(struct mm_struct * mm)
59568 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
59569 spin_unlock(&fs->lock);
59570 return -EAGAIN;
59571 }
59572 - fs->users++;
59573 + atomic_inc(&fs->users);
59574 spin_unlock(&fs->lock);
59575 return 0;
59576 }
59577 tsk->fs = copy_fs_struct(fs);
59578 if (!tsk->fs)
59579 return -ENOMEM;
59580 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59581 return 0;
59582 }
59583
59584 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
59585 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59586 #endif
59587 retval = -EAGAIN;
59588 +
59589 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59590 +
59591 if (atomic_read(&p->real_cred->user->processes) >=
59592 task_rlimit(p, RLIMIT_NPROC)) {
59593 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59594 - p->real_cred->user != INIT_USER)
59595 + if (p->real_cred->user != INIT_USER &&
59596 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
59597 goto bad_fork_free;
59598 }
59599 + current->flags &= ~PF_NPROC_EXCEEDED;
59600
59601 retval = copy_creds(p, clone_flags);
59602 if (retval < 0)
59603 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
59604 if (clone_flags & CLONE_THREAD)
59605 p->tgid = current->tgid;
59606
59607 + gr_copy_label(p);
59608 +
59609 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59610 /*
59611 * Clear TID on mm_release()?
59612 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
59613 bad_fork_free:
59614 free_task(p);
59615 fork_out:
59616 + gr_log_forkfail(retval);
59617 +
59618 return ERR_PTR(retval);
59619 }
59620
59621 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
59622 if (clone_flags & CLONE_PARENT_SETTID)
59623 put_user(nr, parent_tidptr);
59624
59625 + gr_handle_brute_check();
59626 +
59627 if (clone_flags & CLONE_VFORK) {
59628 p->vfork_done = &vfork;
59629 init_completion(&vfork);
59630 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
59631 return 0;
59632
59633 /* don't need lock here; in the worst case we'll do useless copy */
59634 - if (fs->users == 1)
59635 + if (atomic_read(&fs->users) == 1)
59636 return 0;
59637
59638 *new_fsp = copy_fs_struct(fs);
59639 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59640 fs = current->fs;
59641 spin_lock(&fs->lock);
59642 current->fs = new_fs;
59643 - if (--fs->users)
59644 + gr_set_chroot_entries(current, &current->fs->root);
59645 + if (atomic_dec_return(&fs->users))
59646 new_fs = NULL;
59647 else
59648 new_fs = fs;
59649 diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
59650 --- linux-3.0.4/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
59651 +++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
59652 @@ -54,6 +54,7 @@
59653 #include <linux/mount.h>
59654 #include <linux/pagemap.h>
59655 #include <linux/syscalls.h>
59656 +#include <linux/ptrace.h>
59657 #include <linux/signal.h>
59658 #include <linux/module.h>
59659 #include <linux/magic.h>
59660 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59661 struct page *page, *page_head;
59662 int err, ro = 0;
59663
59664 +#ifdef CONFIG_PAX_SEGMEXEC
59665 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59666 + return -EFAULT;
59667 +#endif
59668 +
59669 /*
59670 * The futex address must be "naturally" aligned.
59671 */
59672 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
59673 struct futex_q q = futex_q_init;
59674 int ret;
59675
59676 + pax_track_stack();
59677 +
59678 if (!bitset)
59679 return -EINVAL;
59680 q.bitset = bitset;
59681 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
59682 struct futex_q q = futex_q_init;
59683 int res, ret;
59684
59685 + pax_track_stack();
59686 +
59687 if (!bitset)
59688 return -EINVAL;
59689
59690 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59691 {
59692 struct robust_list_head __user *head;
59693 unsigned long ret;
59694 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59695 const struct cred *cred = current_cred(), *pcred;
59696 +#endif
59697
59698 if (!futex_cmpxchg_enabled)
59699 return -ENOSYS;
59700 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59701 if (!p)
59702 goto err_unlock;
59703 ret = -EPERM;
59704 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59705 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59706 + goto err_unlock;
59707 +#else
59708 pcred = __task_cred(p);
59709 /* If victim is in different user_ns, then uids are not
59710 comparable, so we must have CAP_SYS_PTRACE */
59711 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59712 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59713 goto err_unlock;
59714 ok:
59715 +#endif
59716 head = p->robust_list;
59717 rcu_read_unlock();
59718 }
59719 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
59720 {
59721 u32 curval;
59722 int i;
59723 + mm_segment_t oldfs;
59724
59725 /*
59726 * This will fail and we want it. Some arch implementations do
59727 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
59728 * implementation, the non-functional ones will return
59729 * -ENOSYS.
59730 */
59731 + oldfs = get_fs();
59732 + set_fs(USER_DS);
59733 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
59734 futex_cmpxchg_enabled = 1;
59735 + set_fs(oldfs);
59736
59737 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
59738 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
59739 diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
59740 --- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
59741 +++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
59742 @@ -10,6 +10,7 @@
59743 #include <linux/compat.h>
59744 #include <linux/nsproxy.h>
59745 #include <linux/futex.h>
59746 +#include <linux/ptrace.h>
59747
59748 #include <asm/uaccess.h>
59749
59750 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
59751 {
59752 struct compat_robust_list_head __user *head;
59753 unsigned long ret;
59754 - const struct cred *cred = current_cred(), *pcred;
59755 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59756 + const struct cred *cred = current_cred();
59757 + const struct cred *pcred;
59758 +#endif
59759
59760 if (!futex_cmpxchg_enabled)
59761 return -ENOSYS;
59762 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
59763 if (!p)
59764 goto err_unlock;
59765 ret = -EPERM;
59766 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59767 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59768 + goto err_unlock;
59769 +#else
59770 pcred = __task_cred(p);
59771 /* If victim is in different user_ns, then uids are not
59772 comparable, so we must have CAP_SYS_PTRACE */
59773 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
59774 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59775 goto err_unlock;
59776 ok:
59777 +#endif
59778 head = p->compat_robust_list;
59779 rcu_read_unlock();
59780 }
59781 diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
59782 --- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
59783 +++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
59784 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59785 }
59786
59787 #ifdef CONFIG_MODULES
59788 -static inline int within(void *addr, void *start, unsigned long size)
59789 -{
59790 - return ((addr >= start) && (addr < start + size));
59791 -}
59792 -
59793 /* Update list and generate events when modules are unloaded. */
59794 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59795 void *data)
59796 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59797 prev = NULL;
59798 /* Remove entries located in module from linked list. */
59799 for (info = gcov_info_head; info; info = info->next) {
59800 - if (within(info, mod->module_core, mod->core_size)) {
59801 + if (within_module_core_rw((unsigned long)info, mod)) {
59802 if (prev)
59803 prev->next = info->next;
59804 else
59805 diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
59806 --- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
59807 +++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
59808 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59809 local_irq_restore(flags);
59810 }
59811
59812 -static void run_hrtimer_softirq(struct softirq_action *h)
59813 +static void run_hrtimer_softirq(void)
59814 {
59815 hrtimer_peek_ahead_timers();
59816 }
59817 diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
59818 --- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
59819 +++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
59820 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
59821
59822 size = (((unsigned long)stop - (unsigned long)start)
59823 / sizeof(struct jump_entry));
59824 + pax_open_kernel();
59825 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59826 + pax_close_kernel();
59827 }
59828
59829 static void jump_label_update(struct jump_label_key *key, int enable);
59830 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
59831 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
59832 struct jump_entry *iter;
59833
59834 + pax_open_kernel();
59835 for (iter = iter_start; iter < iter_stop; iter++) {
59836 if (within_module_init(iter->code, mod))
59837 iter->code = 0;
59838 }
59839 + pax_close_kernel();
59840 }
59841
59842 static int
59843 diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
59844 --- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
59845 +++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
59846 @@ -11,6 +11,9 @@
59847 * Changed the compression method from stem compression to "table lookup"
59848 * compression (see scripts/kallsyms.c for a more complete description)
59849 */
59850 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59851 +#define __INCLUDED_BY_HIDESYM 1
59852 +#endif
59853 #include <linux/kallsyms.h>
59854 #include <linux/module.h>
59855 #include <linux/init.h>
59856 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
59857
59858 static inline int is_kernel_inittext(unsigned long addr)
59859 {
59860 + if (system_state != SYSTEM_BOOTING)
59861 + return 0;
59862 +
59863 if (addr >= (unsigned long)_sinittext
59864 && addr <= (unsigned long)_einittext)
59865 return 1;
59866 return 0;
59867 }
59868
59869 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59870 +#ifdef CONFIG_MODULES
59871 +static inline int is_module_text(unsigned long addr)
59872 +{
59873 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59874 + return 1;
59875 +
59876 + addr = ktla_ktva(addr);
59877 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59878 +}
59879 +#else
59880 +static inline int is_module_text(unsigned long addr)
59881 +{
59882 + return 0;
59883 +}
59884 +#endif
59885 +#endif
59886 +
59887 static inline int is_kernel_text(unsigned long addr)
59888 {
59889 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59890 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
59891
59892 static inline int is_kernel(unsigned long addr)
59893 {
59894 +
59895 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59896 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59897 + return 1;
59898 +
59899 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59900 +#else
59901 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59902 +#endif
59903 +
59904 return 1;
59905 return in_gate_area_no_mm(addr);
59906 }
59907
59908 static int is_ksym_addr(unsigned long addr)
59909 {
59910 +
59911 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59912 + if (is_module_text(addr))
59913 + return 0;
59914 +#endif
59915 +
59916 if (all_var)
59917 return is_kernel(addr);
59918
59919 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
59920
59921 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59922 {
59923 - iter->name[0] = '\0';
59924 iter->nameoff = get_symbol_offset(new_pos);
59925 iter->pos = new_pos;
59926 }
59927 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
59928 {
59929 struct kallsym_iter *iter = m->private;
59930
59931 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59932 + if (current_uid())
59933 + return 0;
59934 +#endif
59935 +
59936 /* Some debugging symbols have no name. Ignore them. */
59937 if (!iter->name[0])
59938 return 0;
59939 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
59940 struct kallsym_iter *iter;
59941 int ret;
59942
59943 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59944 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59945 if (!iter)
59946 return -ENOMEM;
59947 reset_iter(iter, 0);
59948 diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
59949 --- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
59950 +++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
59951 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59952 * If module auto-loading support is disabled then this function
59953 * becomes a no-operation.
59954 */
59955 -int __request_module(bool wait, const char *fmt, ...)
59956 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59957 {
59958 - va_list args;
59959 char module_name[MODULE_NAME_LEN];
59960 unsigned int max_modprobes;
59961 int ret;
59962 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59963 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59964 static char *envp[] = { "HOME=/",
59965 "TERM=linux",
59966 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59967 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
59968 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
59969 static int kmod_loop_msg;
59970
59971 - va_start(args, fmt);
59972 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59973 - va_end(args);
59974 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59975 if (ret >= MODULE_NAME_LEN)
59976 return -ENAMETOOLONG;
59977
59978 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
59979 if (ret)
59980 return ret;
59981
59982 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59983 + if (!current_uid()) {
59984 + /* hack to workaround consolekit/udisks stupidity */
59985 + read_lock(&tasklist_lock);
59986 + if (!strcmp(current->comm, "mount") &&
59987 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59988 + read_unlock(&tasklist_lock);
59989 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59990 + return -EPERM;
59991 + }
59992 + read_unlock(&tasklist_lock);
59993 + }
59994 +#endif
59995 +
59996 /* If modprobe needs a service that is in a module, we get a recursive
59997 * loop. Limit the number of running kmod threads to max_threads/2 or
59998 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59999 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60000 atomic_dec(&kmod_concurrent);
60001 return ret;
60002 }
60003 +
60004 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60005 +{
60006 + va_list args;
60007 + int ret;
60008 +
60009 + va_start(args, fmt);
60010 + ret = ____request_module(wait, module_param, fmt, args);
60011 + va_end(args);
60012 +
60013 + return ret;
60014 +}
60015 +
60016 +int __request_module(bool wait, const char *fmt, ...)
60017 +{
60018 + va_list args;
60019 + int ret;
60020 +
60021 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60022 + if (current_uid()) {
60023 + char module_param[MODULE_NAME_LEN];
60024 +
60025 + memset(module_param, 0, sizeof(module_param));
60026 +
60027 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60028 +
60029 + va_start(args, fmt);
60030 + ret = ____request_module(wait, module_param, fmt, args);
60031 + va_end(args);
60032 +
60033 + return ret;
60034 + }
60035 +#endif
60036 +
60037 + va_start(args, fmt);
60038 + ret = ____request_module(wait, NULL, fmt, args);
60039 + va_end(args);
60040 +
60041 + return ret;
60042 +}
60043 +
60044 EXPORT_SYMBOL(__request_module);
60045 #endif /* CONFIG_MODULES */
60046
60047 diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60048 --- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
60049 +++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
60050 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60051 * kernel image and loaded module images reside. This is required
60052 * so x86_64 can correctly handle the %rip-relative fixups.
60053 */
60054 - kip->insns = module_alloc(PAGE_SIZE);
60055 + kip->insns = module_alloc_exec(PAGE_SIZE);
60056 if (!kip->insns) {
60057 kfree(kip);
60058 return NULL;
60059 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60060 */
60061 if (!list_is_singular(&kip->list)) {
60062 list_del(&kip->list);
60063 - module_free(NULL, kip->insns);
60064 + module_free_exec(NULL, kip->insns);
60065 kfree(kip);
60066 }
60067 return 1;
60068 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60069 {
60070 int i, err = 0;
60071 unsigned long offset = 0, size = 0;
60072 - char *modname, namebuf[128];
60073 + char *modname, namebuf[KSYM_NAME_LEN];
60074 const char *symbol_name;
60075 void *addr;
60076 struct kprobe_blackpoint *kb;
60077 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60078 const char *sym = NULL;
60079 unsigned int i = *(loff_t *) v;
60080 unsigned long offset = 0;
60081 - char *modname, namebuf[128];
60082 + char *modname, namebuf[KSYM_NAME_LEN];
60083
60084 head = &kprobe_table[i];
60085 preempt_disable();
60086 diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60087 --- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
60088 +++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
60089 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
60090 end = (unsigned long) &_end,
60091 addr = (unsigned long) obj;
60092
60093 +#ifdef CONFIG_PAX_KERNEXEC
60094 + start = ktla_ktva(start);
60095 +#endif
60096 +
60097 /*
60098 * static variable?
60099 */
60100 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60101 if (!static_obj(lock->key)) {
60102 debug_locks_off();
60103 printk("INFO: trying to register non-static key.\n");
60104 + printk("lock:%pS key:%pS.\n", lock, lock->key);
60105 printk("the code is fine but needs lockdep annotation.\n");
60106 printk("turning off the locking correctness validator.\n");
60107 dump_stack();
60108 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60109 if (!class)
60110 return 0;
60111 }
60112 - atomic_inc((atomic_t *)&class->ops);
60113 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60114 if (very_verbose(class)) {
60115 printk("\nacquire class [%p] %s", class->key, class->name);
60116 if (class->name_version > 1)
60117 diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60118 --- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
60119 +++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
60120 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60121
60122 static void print_name(struct seq_file *m, struct lock_class *class)
60123 {
60124 - char str[128];
60125 + char str[KSYM_NAME_LEN];
60126 const char *name = class->name;
60127
60128 if (!name) {
60129 diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60130 --- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60131 +++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60132 @@ -58,6 +58,7 @@
60133 #include <linux/jump_label.h>
60134 #include <linux/pfn.h>
60135 #include <linux/bsearch.h>
60136 +#include <linux/grsecurity.h>
60137
60138 #define CREATE_TRACE_POINTS
60139 #include <trace/events/module.h>
60140 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60141
60142 /* Bounds of module allocation, for speeding __module_address.
60143 * Protected by module_mutex. */
60144 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60145 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60146 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60147
60148 int register_module_notifier(struct notifier_block * nb)
60149 {
60150 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60151 return true;
60152
60153 list_for_each_entry_rcu(mod, &modules, list) {
60154 - struct symsearch arr[] = {
60155 + struct symsearch modarr[] = {
60156 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60157 NOT_GPL_ONLY, false },
60158 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60159 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60160 #endif
60161 };
60162
60163 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60164 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60165 return true;
60166 }
60167 return false;
60168 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60169 static int percpu_modalloc(struct module *mod,
60170 unsigned long size, unsigned long align)
60171 {
60172 - if (align > PAGE_SIZE) {
60173 + if (align-1 >= PAGE_SIZE) {
60174 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60175 mod->name, align, PAGE_SIZE);
60176 align = PAGE_SIZE;
60177 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60178 */
60179 #ifdef CONFIG_SYSFS
60180
60181 -#ifdef CONFIG_KALLSYMS
60182 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60183 static inline bool sect_empty(const Elf_Shdr *sect)
60184 {
60185 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60186 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60187
60188 static void unset_module_core_ro_nx(struct module *mod)
60189 {
60190 - set_page_attributes(mod->module_core + mod->core_text_size,
60191 - mod->module_core + mod->core_size,
60192 + set_page_attributes(mod->module_core_rw,
60193 + mod->module_core_rw + mod->core_size_rw,
60194 set_memory_x);
60195 - set_page_attributes(mod->module_core,
60196 - mod->module_core + mod->core_ro_size,
60197 + set_page_attributes(mod->module_core_rx,
60198 + mod->module_core_rx + mod->core_size_rx,
60199 set_memory_rw);
60200 }
60201
60202 static void unset_module_init_ro_nx(struct module *mod)
60203 {
60204 - set_page_attributes(mod->module_init + mod->init_text_size,
60205 - mod->module_init + mod->init_size,
60206 + set_page_attributes(mod->module_init_rw,
60207 + mod->module_init_rw + mod->init_size_rw,
60208 set_memory_x);
60209 - set_page_attributes(mod->module_init,
60210 - mod->module_init + mod->init_ro_size,
60211 + set_page_attributes(mod->module_init_rx,
60212 + mod->module_init_rx + mod->init_size_rx,
60213 set_memory_rw);
60214 }
60215
60216 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60217
60218 mutex_lock(&module_mutex);
60219 list_for_each_entry_rcu(mod, &modules, list) {
60220 - if ((mod->module_core) && (mod->core_text_size)) {
60221 - set_page_attributes(mod->module_core,
60222 - mod->module_core + mod->core_text_size,
60223 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
60224 + set_page_attributes(mod->module_core_rx,
60225 + mod->module_core_rx + mod->core_size_rx,
60226 set_memory_rw);
60227 }
60228 - if ((mod->module_init) && (mod->init_text_size)) {
60229 - set_page_attributes(mod->module_init,
60230 - mod->module_init + mod->init_text_size,
60231 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
60232 + set_page_attributes(mod->module_init_rx,
60233 + mod->module_init_rx + mod->init_size_rx,
60234 set_memory_rw);
60235 }
60236 }
60237 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60238
60239 mutex_lock(&module_mutex);
60240 list_for_each_entry_rcu(mod, &modules, list) {
60241 - if ((mod->module_core) && (mod->core_text_size)) {
60242 - set_page_attributes(mod->module_core,
60243 - mod->module_core + mod->core_text_size,
60244 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
60245 + set_page_attributes(mod->module_core_rx,
60246 + mod->module_core_rx + mod->core_size_rx,
60247 set_memory_ro);
60248 }
60249 - if ((mod->module_init) && (mod->init_text_size)) {
60250 - set_page_attributes(mod->module_init,
60251 - mod->module_init + mod->init_text_size,
60252 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
60253 + set_page_attributes(mod->module_init_rx,
60254 + mod->module_init_rx + mod->init_size_rx,
60255 set_memory_ro);
60256 }
60257 }
60258 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60259
60260 /* This may be NULL, but that's OK */
60261 unset_module_init_ro_nx(mod);
60262 - module_free(mod, mod->module_init);
60263 + module_free(mod, mod->module_init_rw);
60264 + module_free_exec(mod, mod->module_init_rx);
60265 kfree(mod->args);
60266 percpu_modfree(mod);
60267
60268 /* Free lock-classes: */
60269 - lockdep_free_key_range(mod->module_core, mod->core_size);
60270 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60271 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60272
60273 /* Finally, free the core (containing the module structure) */
60274 unset_module_core_ro_nx(mod);
60275 - module_free(mod, mod->module_core);
60276 + module_free_exec(mod, mod->module_core_rx);
60277 + module_free(mod, mod->module_core_rw);
60278
60279 #ifdef CONFIG_MPU
60280 update_protections(current->mm);
60281 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60282 unsigned int i;
60283 int ret = 0;
60284 const struct kernel_symbol *ksym;
60285 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60286 + int is_fs_load = 0;
60287 + int register_filesystem_found = 0;
60288 + char *p;
60289 +
60290 + p = strstr(mod->args, "grsec_modharden_fs");
60291 + if (p) {
60292 + char *endptr = p + strlen("grsec_modharden_fs");
60293 + /* copy \0 as well */
60294 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60295 + is_fs_load = 1;
60296 + }
60297 +#endif
60298
60299 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60300 const char *name = info->strtab + sym[i].st_name;
60301
60302 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60303 + /* it's a real shame this will never get ripped and copied
60304 + upstream! ;(
60305 + */
60306 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60307 + register_filesystem_found = 1;
60308 +#endif
60309 +
60310 switch (sym[i].st_shndx) {
60311 case SHN_COMMON:
60312 /* We compiled with -fno-common. These are not
60313 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60314 ksym = resolve_symbol_wait(mod, info, name);
60315 /* Ok if resolved. */
60316 if (ksym && !IS_ERR(ksym)) {
60317 + pax_open_kernel();
60318 sym[i].st_value = ksym->value;
60319 + pax_close_kernel();
60320 break;
60321 }
60322
60323 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60324 secbase = (unsigned long)mod_percpu(mod);
60325 else
60326 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60327 + pax_open_kernel();
60328 sym[i].st_value += secbase;
60329 + pax_close_kernel();
60330 break;
60331 }
60332 }
60333
60334 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60335 + if (is_fs_load && !register_filesystem_found) {
60336 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60337 + ret = -EPERM;
60338 + }
60339 +#endif
60340 +
60341 return ret;
60342 }
60343
60344 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60345 || s->sh_entsize != ~0UL
60346 || strstarts(sname, ".init"))
60347 continue;
60348 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60349 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60350 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60351 + else
60352 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60353 DEBUGP("\t%s\n", name);
60354 }
60355 - switch (m) {
60356 - case 0: /* executable */
60357 - mod->core_size = debug_align(mod->core_size);
60358 - mod->core_text_size = mod->core_size;
60359 - break;
60360 - case 1: /* RO: text and ro-data */
60361 - mod->core_size = debug_align(mod->core_size);
60362 - mod->core_ro_size = mod->core_size;
60363 - break;
60364 - case 3: /* whole core */
60365 - mod->core_size = debug_align(mod->core_size);
60366 - break;
60367 - }
60368 }
60369
60370 DEBUGP("Init section allocation order:\n");
60371 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60372 || s->sh_entsize != ~0UL
60373 || !strstarts(sname, ".init"))
60374 continue;
60375 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60376 - | INIT_OFFSET_MASK);
60377 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60378 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60379 + else
60380 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60381 + s->sh_entsize |= INIT_OFFSET_MASK;
60382 DEBUGP("\t%s\n", sname);
60383 }
60384 - switch (m) {
60385 - case 0: /* executable */
60386 - mod->init_size = debug_align(mod->init_size);
60387 - mod->init_text_size = mod->init_size;
60388 - break;
60389 - case 1: /* RO: text and ro-data */
60390 - mod->init_size = debug_align(mod->init_size);
60391 - mod->init_ro_size = mod->init_size;
60392 - break;
60393 - case 3: /* whole init */
60394 - mod->init_size = debug_align(mod->init_size);
60395 - break;
60396 - }
60397 }
60398 }
60399
60400 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
60401
60402 /* Put symbol section at end of init part of module. */
60403 symsect->sh_flags |= SHF_ALLOC;
60404 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60405 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60406 info->index.sym) | INIT_OFFSET_MASK;
60407 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
60408
60409 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
60410 }
60411
60412 /* Append room for core symbols at end of core part. */
60413 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60414 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
60415 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60416 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
60417
60418 /* Put string table section at end of init part of module. */
60419 strsect->sh_flags |= SHF_ALLOC;
60420 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60421 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60422 info->index.str) | INIT_OFFSET_MASK;
60423 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
60424
60425 /* Append room for core symbols' strings at end of core part. */
60426 - info->stroffs = mod->core_size;
60427 + info->stroffs = mod->core_size_rx;
60428 __set_bit(0, info->strmap);
60429 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
60430 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
60431 }
60432
60433 static void add_kallsyms(struct module *mod, const struct load_info *info)
60434 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
60435 /* Make sure we get permanent strtab: don't use info->strtab. */
60436 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
60437
60438 + pax_open_kernel();
60439 +
60440 /* Set types up while we still have access to sections. */
60441 for (i = 0; i < mod->num_symtab; i++)
60442 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
60443
60444 - mod->core_symtab = dst = mod->module_core + info->symoffs;
60445 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
60446 src = mod->symtab;
60447 *dst = *src;
60448 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60449 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
60450 }
60451 mod->core_num_syms = ndst;
60452
60453 - mod->core_strtab = s = mod->module_core + info->stroffs;
60454 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
60455 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
60456 if (test_bit(i, info->strmap))
60457 *++s = mod->strtab[i];
60458 +
60459 + pax_close_kernel();
60460 }
60461 #else
60462 static inline void layout_symtab(struct module *mod, struct load_info *info)
60463 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
60464 ddebug_remove_module(debug->modname);
60465 }
60466
60467 -static void *module_alloc_update_bounds(unsigned long size)
60468 +static void *module_alloc_update_bounds_rw(unsigned long size)
60469 {
60470 void *ret = module_alloc(size);
60471
60472 if (ret) {
60473 mutex_lock(&module_mutex);
60474 /* Update module bounds. */
60475 - if ((unsigned long)ret < module_addr_min)
60476 - module_addr_min = (unsigned long)ret;
60477 - if ((unsigned long)ret + size > module_addr_max)
60478 - module_addr_max = (unsigned long)ret + size;
60479 + if ((unsigned long)ret < module_addr_min_rw)
60480 + module_addr_min_rw = (unsigned long)ret;
60481 + if ((unsigned long)ret + size > module_addr_max_rw)
60482 + module_addr_max_rw = (unsigned long)ret + size;
60483 + mutex_unlock(&module_mutex);
60484 + }
60485 + return ret;
60486 +}
60487 +
60488 +static void *module_alloc_update_bounds_rx(unsigned long size)
60489 +{
60490 + void *ret = module_alloc_exec(size);
60491 +
60492 + if (ret) {
60493 + mutex_lock(&module_mutex);
60494 + /* Update module bounds. */
60495 + if ((unsigned long)ret < module_addr_min_rx)
60496 + module_addr_min_rx = (unsigned long)ret;
60497 + if ((unsigned long)ret + size > module_addr_max_rx)
60498 + module_addr_max_rx = (unsigned long)ret + size;
60499 mutex_unlock(&module_mutex);
60500 }
60501 return ret;
60502 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
60503 void *ptr;
60504
60505 /* Do the allocs. */
60506 - ptr = module_alloc_update_bounds(mod->core_size);
60507 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60508 /*
60509 * The pointer to this block is stored in the module structure
60510 * which is inside the block. Just mark it as not being a
60511 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
60512 if (!ptr)
60513 return -ENOMEM;
60514
60515 - memset(ptr, 0, mod->core_size);
60516 - mod->module_core = ptr;
60517 + memset(ptr, 0, mod->core_size_rw);
60518 + mod->module_core_rw = ptr;
60519
60520 - ptr = module_alloc_update_bounds(mod->init_size);
60521 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60522 /*
60523 * The pointer to this block is stored in the module structure
60524 * which is inside the block. This block doesn't need to be
60525 * scanned as it contains data and code that will be freed
60526 * after the module is initialized.
60527 */
60528 - kmemleak_ignore(ptr);
60529 - if (!ptr && mod->init_size) {
60530 - module_free(mod, mod->module_core);
60531 + kmemleak_not_leak(ptr);
60532 + if (!ptr && mod->init_size_rw) {
60533 + module_free(mod, mod->module_core_rw);
60534 return -ENOMEM;
60535 }
60536 - memset(ptr, 0, mod->init_size);
60537 - mod->module_init = ptr;
60538 + memset(ptr, 0, mod->init_size_rw);
60539 + mod->module_init_rw = ptr;
60540 +
60541 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60542 + kmemleak_not_leak(ptr);
60543 + if (!ptr) {
60544 + module_free(mod, mod->module_init_rw);
60545 + module_free(mod, mod->module_core_rw);
60546 + return -ENOMEM;
60547 + }
60548 +
60549 + pax_open_kernel();
60550 + memset(ptr, 0, mod->core_size_rx);
60551 + pax_close_kernel();
60552 + mod->module_core_rx = ptr;
60553 +
60554 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60555 + kmemleak_not_leak(ptr);
60556 + if (!ptr && mod->init_size_rx) {
60557 + module_free_exec(mod, mod->module_core_rx);
60558 + module_free(mod, mod->module_init_rw);
60559 + module_free(mod, mod->module_core_rw);
60560 + return -ENOMEM;
60561 + }
60562 +
60563 + pax_open_kernel();
60564 + memset(ptr, 0, mod->init_size_rx);
60565 + pax_close_kernel();
60566 + mod->module_init_rx = ptr;
60567
60568 /* Transfer each section which specifies SHF_ALLOC */
60569 DEBUGP("final section addresses:\n");
60570 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
60571 if (!(shdr->sh_flags & SHF_ALLOC))
60572 continue;
60573
60574 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
60575 - dest = mod->module_init
60576 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60577 - else
60578 - dest = mod->module_core + shdr->sh_entsize;
60579 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
60580 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60581 + dest = mod->module_init_rw
60582 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60583 + else
60584 + dest = mod->module_init_rx
60585 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60586 + } else {
60587 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60588 + dest = mod->module_core_rw + shdr->sh_entsize;
60589 + else
60590 + dest = mod->module_core_rx + shdr->sh_entsize;
60591 + }
60592 +
60593 + if (shdr->sh_type != SHT_NOBITS) {
60594 +
60595 +#ifdef CONFIG_PAX_KERNEXEC
60596 +#ifdef CONFIG_X86_64
60597 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
60598 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60599 +#endif
60600 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
60601 + pax_open_kernel();
60602 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60603 + pax_close_kernel();
60604 + } else
60605 +#endif
60606
60607 - if (shdr->sh_type != SHT_NOBITS)
60608 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60609 + }
60610 /* Update sh_addr to point to copy in image. */
60611 - shdr->sh_addr = (unsigned long)dest;
60612 +
60613 +#ifdef CONFIG_PAX_KERNEXEC
60614 + if (shdr->sh_flags & SHF_EXECINSTR)
60615 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
60616 + else
60617 +#endif
60618 +
60619 + shdr->sh_addr = (unsigned long)dest;
60620 DEBUGP("\t0x%lx %s\n",
60621 shdr->sh_addr, info->secstrings + shdr->sh_name);
60622 }
60623 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
60624 * Do it before processing of module parameters, so the module
60625 * can provide parameter accessor functions of its own.
60626 */
60627 - if (mod->module_init)
60628 - flush_icache_range((unsigned long)mod->module_init,
60629 - (unsigned long)mod->module_init
60630 - + mod->init_size);
60631 - flush_icache_range((unsigned long)mod->module_core,
60632 - (unsigned long)mod->module_core + mod->core_size);
60633 + if (mod->module_init_rx)
60634 + flush_icache_range((unsigned long)mod->module_init_rx,
60635 + (unsigned long)mod->module_init_rx
60636 + + mod->init_size_rx);
60637 + flush_icache_range((unsigned long)mod->module_core_rx,
60638 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60639
60640 set_fs(old_fs);
60641 }
60642 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
60643 {
60644 kfree(info->strmap);
60645 percpu_modfree(mod);
60646 - module_free(mod, mod->module_init);
60647 - module_free(mod, mod->module_core);
60648 + module_free_exec(mod, mod->module_init_rx);
60649 + module_free_exec(mod, mod->module_core_rx);
60650 + module_free(mod, mod->module_init_rw);
60651 + module_free(mod, mod->module_core_rw);
60652 }
60653
60654 static int post_relocation(struct module *mod, const struct load_info *info)
60655 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
60656 if (err)
60657 goto free_unload;
60658
60659 + /* Now copy in args */
60660 + mod->args = strndup_user(uargs, ~0UL >> 1);
60661 + if (IS_ERR(mod->args)) {
60662 + err = PTR_ERR(mod->args);
60663 + goto free_unload;
60664 + }
60665 +
60666 /* Set up MODINFO_ATTR fields */
60667 setup_modinfo(mod, &info);
60668
60669 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60670 + {
60671 + char *p, *p2;
60672 +
60673 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60674 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60675 + err = -EPERM;
60676 + goto free_modinfo;
60677 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60678 + p += strlen("grsec_modharden_normal");
60679 + p2 = strstr(p, "_");
60680 + if (p2) {
60681 + *p2 = '\0';
60682 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60683 + *p2 = '_';
60684 + }
60685 + err = -EPERM;
60686 + goto free_modinfo;
60687 + }
60688 + }
60689 +#endif
60690 +
60691 /* Fix up syms, so that st_value is a pointer to location. */
60692 err = simplify_symbols(mod, &info);
60693 if (err < 0)
60694 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
60695
60696 flush_module_icache(mod);
60697
60698 - /* Now copy in args */
60699 - mod->args = strndup_user(uargs, ~0UL >> 1);
60700 - if (IS_ERR(mod->args)) {
60701 - err = PTR_ERR(mod->args);
60702 - goto free_arch_cleanup;
60703 - }
60704 -
60705 /* Mark state as coming so strong_try_module_get() ignores us. */
60706 mod->state = MODULE_STATE_COMING;
60707
60708 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
60709 unlock:
60710 mutex_unlock(&module_mutex);
60711 synchronize_sched();
60712 - kfree(mod->args);
60713 - free_arch_cleanup:
60714 module_arch_cleanup(mod);
60715 free_modinfo:
60716 free_modinfo(mod);
60717 + kfree(mod->args);
60718 free_unload:
60719 module_unload_free(mod);
60720 free_module:
60721 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
60722 MODULE_STATE_COMING, mod);
60723
60724 /* Set RO and NX regions for core */
60725 - set_section_ro_nx(mod->module_core,
60726 - mod->core_text_size,
60727 - mod->core_ro_size,
60728 - mod->core_size);
60729 + set_section_ro_nx(mod->module_core_rx,
60730 + mod->core_size_rx,
60731 + mod->core_size_rx,
60732 + mod->core_size_rx);
60733
60734 /* Set RO and NX regions for init */
60735 - set_section_ro_nx(mod->module_init,
60736 - mod->init_text_size,
60737 - mod->init_ro_size,
60738 - mod->init_size);
60739 + set_section_ro_nx(mod->module_init_rx,
60740 + mod->init_size_rx,
60741 + mod->init_size_rx,
60742 + mod->init_size_rx);
60743
60744 do_mod_ctors(mod);
60745 /* Start the module */
60746 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
60747 mod->strtab = mod->core_strtab;
60748 #endif
60749 unset_module_init_ro_nx(mod);
60750 - module_free(mod, mod->module_init);
60751 - mod->module_init = NULL;
60752 - mod->init_size = 0;
60753 - mod->init_ro_size = 0;
60754 - mod->init_text_size = 0;
60755 + module_free(mod, mod->module_init_rw);
60756 + module_free_exec(mod, mod->module_init_rx);
60757 + mod->module_init_rw = NULL;
60758 + mod->module_init_rx = NULL;
60759 + mod->init_size_rw = 0;
60760 + mod->init_size_rx = 0;
60761 mutex_unlock(&module_mutex);
60762
60763 return 0;
60764 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
60765 unsigned long nextval;
60766
60767 /* At worse, next value is at end of module */
60768 - if (within_module_init(addr, mod))
60769 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60770 + if (within_module_init_rx(addr, mod))
60771 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60772 + else if (within_module_init_rw(addr, mod))
60773 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60774 + else if (within_module_core_rx(addr, mod))
60775 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60776 + else if (within_module_core_rw(addr, mod))
60777 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60778 else
60779 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60780 + return NULL;
60781
60782 /* Scan for closest preceding symbol, and next symbol. (ELF
60783 starts real symbols at 1). */
60784 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
60785 char buf[8];
60786
60787 seq_printf(m, "%s %u",
60788 - mod->name, mod->init_size + mod->core_size);
60789 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60790 print_unload_info(m, mod);
60791
60792 /* Informative for users. */
60793 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
60794 mod->state == MODULE_STATE_COMING ? "Loading":
60795 "Live");
60796 /* Used by oprofile and other similar tools. */
60797 - seq_printf(m, " 0x%pK", mod->module_core);
60798 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
60799
60800 /* Taints info */
60801 if (mod->taints)
60802 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
60803
60804 static int __init proc_modules_init(void)
60805 {
60806 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60807 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60808 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60809 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60810 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60811 +#else
60812 proc_create("modules", 0, NULL, &proc_modules_operations);
60813 +#endif
60814 +#else
60815 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60816 +#endif
60817 return 0;
60818 }
60819 module_init(proc_modules_init);
60820 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
60821 {
60822 struct module *mod;
60823
60824 - if (addr < module_addr_min || addr > module_addr_max)
60825 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60826 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60827 return NULL;
60828
60829 list_for_each_entry_rcu(mod, &modules, list)
60830 - if (within_module_core(addr, mod)
60831 - || within_module_init(addr, mod))
60832 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60833 return mod;
60834 return NULL;
60835 }
60836 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
60837 */
60838 struct module *__module_text_address(unsigned long addr)
60839 {
60840 - struct module *mod = __module_address(addr);
60841 + struct module *mod;
60842 +
60843 +#ifdef CONFIG_X86_32
60844 + addr = ktla_ktva(addr);
60845 +#endif
60846 +
60847 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60848 + return NULL;
60849 +
60850 + mod = __module_address(addr);
60851 +
60852 if (mod) {
60853 /* Make sure it's within the text section. */
60854 - if (!within(addr, mod->module_init, mod->init_text_size)
60855 - && !within(addr, mod->module_core, mod->core_text_size))
60856 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60857 mod = NULL;
60858 }
60859 return mod;
60860 diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
60861 --- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
60862 +++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
60863 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
60864 spin_lock_mutex(&lock->wait_lock, flags);
60865
60866 debug_mutex_lock_common(lock, &waiter);
60867 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60868 + debug_mutex_add_waiter(lock, &waiter, task);
60869
60870 /* add waiting tasks to the end of the waitqueue (FIFO): */
60871 list_add_tail(&waiter.list, &lock->wait_list);
60872 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
60873 * TASK_UNINTERRUPTIBLE case.)
60874 */
60875 if (unlikely(signal_pending_state(state, task))) {
60876 - mutex_remove_waiter(lock, &waiter,
60877 - task_thread_info(task));
60878 + mutex_remove_waiter(lock, &waiter, task);
60879 mutex_release(&lock->dep_map, 1, ip);
60880 spin_unlock_mutex(&lock->wait_lock, flags);
60881
60882 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
60883 done:
60884 lock_acquired(&lock->dep_map, ip);
60885 /* got the lock - rejoice! */
60886 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60887 + mutex_remove_waiter(lock, &waiter, task);
60888 mutex_set_owner(lock);
60889
60890 /* set it to 0 if there are no waiters left: */
60891 diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
60892 --- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
60893 +++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
60894 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60895 }
60896
60897 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60898 - struct thread_info *ti)
60899 + struct task_struct *task)
60900 {
60901 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60902
60903 /* Mark the current thread as blocked on the lock: */
60904 - ti->task->blocked_on = waiter;
60905 + task->blocked_on = waiter;
60906 }
60907
60908 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60909 - struct thread_info *ti)
60910 + struct task_struct *task)
60911 {
60912 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60913 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60914 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60915 - ti->task->blocked_on = NULL;
60916 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60917 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60918 + task->blocked_on = NULL;
60919
60920 list_del_init(&waiter->list);
60921 waiter->task = NULL;
60922 diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
60923 --- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
60924 +++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
60925 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
60926 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60927 extern void debug_mutex_add_waiter(struct mutex *lock,
60928 struct mutex_waiter *waiter,
60929 - struct thread_info *ti);
60930 + struct task_struct *task);
60931 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60932 - struct thread_info *ti);
60933 + struct task_struct *task);
60934 extern void debug_mutex_unlock(struct mutex *lock);
60935 extern void debug_mutex_init(struct mutex *lock, const char *name,
60936 struct lock_class_key *key);
60937 diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
60938 --- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
60939 +++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
60940 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
60941 padata->pd = pd;
60942 padata->cb_cpu = cb_cpu;
60943
60944 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
60945 - atomic_set(&pd->seq_nr, -1);
60946 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
60947 + atomic_set_unchecked(&pd->seq_nr, -1);
60948
60949 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
60950 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
60951
60952 target_cpu = padata_cpu_hash(padata);
60953 queue = per_cpu_ptr(pd->pqueue, target_cpu);
60954 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
60955 padata_init_pqueues(pd);
60956 padata_init_squeues(pd);
60957 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
60958 - atomic_set(&pd->seq_nr, -1);
60959 + atomic_set_unchecked(&pd->seq_nr, -1);
60960 atomic_set(&pd->reorder_objects, 0);
60961 atomic_set(&pd->refcnt, 0);
60962 pd->pinst = pinst;
60963 diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
60964 --- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
60965 +++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
60966 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
60967 const char *board;
60968
60969 printk(KERN_WARNING "------------[ cut here ]------------\n");
60970 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60971 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60972 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60973 if (board)
60974 printk(KERN_WARNING "Hardware name: %s\n", board);
60975 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60976 */
60977 void __stack_chk_fail(void)
60978 {
60979 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60980 + dump_stack();
60981 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60982 __builtin_return_address(0));
60983 }
60984 EXPORT_SYMBOL(__stack_chk_fail);
60985 diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
60986 --- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
60987 +++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
60988 @@ -33,6 +33,7 @@
60989 #include <linux/rculist.h>
60990 #include <linux/bootmem.h>
60991 #include <linux/hash.h>
60992 +#include <linux/security.h>
60993 #include <linux/pid_namespace.h>
60994 #include <linux/init_task.h>
60995 #include <linux/syscalls.h>
60996 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60997
60998 int pid_max = PID_MAX_DEFAULT;
60999
61000 -#define RESERVED_PIDS 300
61001 +#define RESERVED_PIDS 500
61002
61003 int pid_max_min = RESERVED_PIDS + 1;
61004 int pid_max_max = PID_MAX_LIMIT;
61005 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61006 */
61007 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61008 {
61009 + struct task_struct *task;
61010 +
61011 rcu_lockdep_assert(rcu_read_lock_held());
61012 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61013 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61014 +
61015 + if (gr_pid_is_chrooted(task))
61016 + return NULL;
61017 +
61018 + return task;
61019 }
61020
61021 struct task_struct *find_task_by_vpid(pid_t vnr)
61022 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61023 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61024 }
61025
61026 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61027 +{
61028 + rcu_lockdep_assert(rcu_read_lock_held());
61029 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61030 +}
61031 +
61032 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61033 {
61034 struct pid *pid;
61035 diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61036 --- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
61037 +++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
61038 @@ -6,6 +6,7 @@
61039 #include <linux/posix-timers.h>
61040 #include <linux/errno.h>
61041 #include <linux/math64.h>
61042 +#include <linux/security.h>
61043 #include <asm/uaccess.h>
61044 #include <linux/kernel_stat.h>
61045 #include <trace/events/timer.h>
61046 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61047
61048 static __init int init_posix_cpu_timers(void)
61049 {
61050 - struct k_clock process = {
61051 + static struct k_clock process = {
61052 .clock_getres = process_cpu_clock_getres,
61053 .clock_get = process_cpu_clock_get,
61054 .timer_create = process_cpu_timer_create,
61055 .nsleep = process_cpu_nsleep,
61056 .nsleep_restart = process_cpu_nsleep_restart,
61057 };
61058 - struct k_clock thread = {
61059 + static struct k_clock thread = {
61060 .clock_getres = thread_cpu_clock_getres,
61061 .clock_get = thread_cpu_clock_get,
61062 .timer_create = thread_cpu_timer_create,
61063 diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61064 --- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
61065 +++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
61066 @@ -43,6 +43,7 @@
61067 #include <linux/idr.h>
61068 #include <linux/posix-clock.h>
61069 #include <linux/posix-timers.h>
61070 +#include <linux/grsecurity.h>
61071 #include <linux/syscalls.h>
61072 #include <linux/wait.h>
61073 #include <linux/workqueue.h>
61074 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61075 * which we beg off on and pass to do_sys_settimeofday().
61076 */
61077
61078 -static struct k_clock posix_clocks[MAX_CLOCKS];
61079 +static struct k_clock *posix_clocks[MAX_CLOCKS];
61080
61081 /*
61082 * These ones are defined below.
61083 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61084 */
61085 static __init int init_posix_timers(void)
61086 {
61087 - struct k_clock clock_realtime = {
61088 + static struct k_clock clock_realtime = {
61089 .clock_getres = hrtimer_get_res,
61090 .clock_get = posix_clock_realtime_get,
61091 .clock_set = posix_clock_realtime_set,
61092 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61093 .timer_get = common_timer_get,
61094 .timer_del = common_timer_del,
61095 };
61096 - struct k_clock clock_monotonic = {
61097 + static struct k_clock clock_monotonic = {
61098 .clock_getres = hrtimer_get_res,
61099 .clock_get = posix_ktime_get_ts,
61100 .nsleep = common_nsleep,
61101 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61102 .timer_get = common_timer_get,
61103 .timer_del = common_timer_del,
61104 };
61105 - struct k_clock clock_monotonic_raw = {
61106 + static struct k_clock clock_monotonic_raw = {
61107 .clock_getres = hrtimer_get_res,
61108 .clock_get = posix_get_monotonic_raw,
61109 };
61110 - struct k_clock clock_realtime_coarse = {
61111 + static struct k_clock clock_realtime_coarse = {
61112 .clock_getres = posix_get_coarse_res,
61113 .clock_get = posix_get_realtime_coarse,
61114 };
61115 - struct k_clock clock_monotonic_coarse = {
61116 + static struct k_clock clock_monotonic_coarse = {
61117 .clock_getres = posix_get_coarse_res,
61118 .clock_get = posix_get_monotonic_coarse,
61119 };
61120 - struct k_clock clock_boottime = {
61121 + static struct k_clock clock_boottime = {
61122 .clock_getres = hrtimer_get_res,
61123 .clock_get = posix_get_boottime,
61124 .nsleep = common_nsleep,
61125 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61126 .timer_del = common_timer_del,
61127 };
61128
61129 + pax_track_stack();
61130 +
61131 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61132 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61133 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61134 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61135 return;
61136 }
61137
61138 - posix_clocks[clock_id] = *new_clock;
61139 + posix_clocks[clock_id] = new_clock;
61140 }
61141 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61142
61143 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61144 return (id & CLOCKFD_MASK) == CLOCKFD ?
61145 &clock_posix_dynamic : &clock_posix_cpu;
61146
61147 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61148 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61149 return NULL;
61150 - return &posix_clocks[id];
61151 + return posix_clocks[id];
61152 }
61153
61154 static int common_timer_create(struct k_itimer *new_timer)
61155 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61156 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61157 return -EFAULT;
61158
61159 + /* only the CLOCK_REALTIME clock can be set, all other clocks
61160 + have their clock_set fptr set to a nosettime dummy function
61161 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61162 + call common_clock_set, which calls do_sys_settimeofday, which
61163 + we hook
61164 + */
61165 +
61166 return kc->clock_set(which_clock, &new_tp);
61167 }
61168
61169 diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61170 --- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61171 +++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61172 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61173 .enable_mask = SYSRQ_ENABLE_BOOT,
61174 };
61175
61176 -static int pm_sysrq_init(void)
61177 +static int __init pm_sysrq_init(void)
61178 {
61179 register_sysrq_key('o', &sysrq_poweroff_op);
61180 return 0;
61181 diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61182 --- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
61183 +++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
61184 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61185 u64 elapsed_csecs64;
61186 unsigned int elapsed_csecs;
61187 bool wakeup = false;
61188 + bool timedout = false;
61189
61190 do_gettimeofday(&start);
61191
61192 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61193
61194 while (true) {
61195 todo = 0;
61196 + if (time_after(jiffies, end_time))
61197 + timedout = true;
61198 read_lock(&tasklist_lock);
61199 do_each_thread(g, p) {
61200 if (frozen(p) || !freezable(p))
61201 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61202 * try_to_stop() after schedule() in ptrace/signal
61203 * stop sees TIF_FREEZE.
61204 */
61205 - if (!task_is_stopped_or_traced(p) &&
61206 - !freezer_should_skip(p))
61207 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61208 todo++;
61209 + if (timedout) {
61210 + printk(KERN_ERR "Task refusing to freeze:\n");
61211 + sched_show_task(p);
61212 + }
61213 + }
61214 } while_each_thread(g, p);
61215 read_unlock(&tasklist_lock);
61216
61217 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61218 todo += wq_busy;
61219 }
61220
61221 - if (!todo || time_after(jiffies, end_time))
61222 + if (!todo || timedout)
61223 break;
61224
61225 if (pm_wakeup_pending()) {
61226 diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61227 --- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61228 +++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61229 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
61230 if (from_file && type != SYSLOG_ACTION_OPEN)
61231 return 0;
61232
61233 +#ifdef CONFIG_GRKERNSEC_DMESG
61234 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61235 + return -EPERM;
61236 +#endif
61237 +
61238 if (syslog_action_restricted(type)) {
61239 if (capable(CAP_SYSLOG))
61240 return 0;
61241 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61242 if (capable(CAP_SYS_ADMIN)) {
61243 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61244 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61245 "but no CAP_SYSLOG (deprecated).\n");
61246 return 0;
61247 }
61248 diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61249 --- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
61250 +++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
61251 @@ -39,7 +39,7 @@ struct profile_hit {
61252 /* Oprofile timer tick hook */
61253 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61254
61255 -static atomic_t *prof_buffer;
61256 +static atomic_unchecked_t *prof_buffer;
61257 static unsigned long prof_len, prof_shift;
61258
61259 int prof_on __read_mostly;
61260 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61261 hits[i].pc = 0;
61262 continue;
61263 }
61264 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61265 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61266 hits[i].hits = hits[i].pc = 0;
61267 }
61268 }
61269 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61270 * Add the current hit(s) and flush the write-queue out
61271 * to the global buffer:
61272 */
61273 - atomic_add(nr_hits, &prof_buffer[pc]);
61274 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61275 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61276 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61277 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61278 hits[i].pc = hits[i].hits = 0;
61279 }
61280 out:
61281 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61282 {
61283 unsigned long pc;
61284 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61285 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61286 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61287 }
61288 #endif /* !CONFIG_SMP */
61289
61290 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61291 return -EFAULT;
61292 buf++; p++; count--; read++;
61293 }
61294 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61295 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61296 if (copy_to_user(buf, (void *)pnt, count))
61297 return -EFAULT;
61298 read += count;
61299 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61300 }
61301 #endif
61302 profile_discard_flip_buffers();
61303 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61304 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61305 return count;
61306 }
61307
61308 diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61309 --- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61310 +++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61311 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61312 return ret;
61313 }
61314
61315 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61316 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61317 + unsigned int log)
61318 {
61319 const struct cred *cred = current_cred(), *tcred;
61320
61321 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61322 cred->gid == tcred->sgid &&
61323 cred->gid == tcred->gid))
61324 goto ok;
61325 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61326 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61327 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61328 goto ok;
61329 rcu_read_unlock();
61330 return -EPERM;
61331 @@ -167,7 +169,9 @@ ok:
61332 smp_rmb();
61333 if (task->mm)
61334 dumpable = get_dumpable(task->mm);
61335 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61336 + if (!dumpable &&
61337 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61338 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61339 return -EPERM;
61340
61341 return security_ptrace_access_check(task, mode);
61342 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61343 {
61344 int err;
61345 task_lock(task);
61346 - err = __ptrace_may_access(task, mode);
61347 + err = __ptrace_may_access(task, mode, 0);
61348 + task_unlock(task);
61349 + return !err;
61350 +}
61351 +
61352 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61353 +{
61354 + int err;
61355 + task_lock(task);
61356 + err = __ptrace_may_access(task, mode, 1);
61357 task_unlock(task);
61358 return !err;
61359 }
61360 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61361 goto out;
61362
61363 task_lock(task);
61364 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61365 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61366 task_unlock(task);
61367 if (retval)
61368 goto unlock_creds;
61369 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61370 goto unlock_tasklist;
61371
61372 task->ptrace = PT_PTRACED;
61373 - if (task_ns_capable(task, CAP_SYS_PTRACE))
61374 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61375 task->ptrace |= PT_PTRACE_CAP;
61376
61377 __ptrace_link(task, current);
61378 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61379 {
61380 int copied = 0;
61381
61382 + pax_track_stack();
61383 +
61384 while (len > 0) {
61385 char buf[128];
61386 int this_len, retval;
61387 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61388 break;
61389 return -EIO;
61390 }
61391 - if (copy_to_user(dst, buf, retval))
61392 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61393 return -EFAULT;
61394 copied += retval;
61395 src += retval;
61396 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
61397 {
61398 int copied = 0;
61399
61400 + pax_track_stack();
61401 +
61402 while (len > 0) {
61403 char buf[128];
61404 int this_len, retval;
61405 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
61406 {
61407 int ret = -EIO;
61408 siginfo_t siginfo;
61409 - void __user *datavp = (void __user *) data;
61410 + void __user *datavp = (__force void __user *) data;
61411 unsigned long __user *datalp = datavp;
61412
61413 + pax_track_stack();
61414 +
61415 switch (request) {
61416 case PTRACE_PEEKTEXT:
61417 case PTRACE_PEEKDATA:
61418 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61419 goto out;
61420 }
61421
61422 + if (gr_handle_ptrace(child, request)) {
61423 + ret = -EPERM;
61424 + goto out_put_task_struct;
61425 + }
61426 +
61427 if (request == PTRACE_ATTACH) {
61428 ret = ptrace_attach(child);
61429 /*
61430 * Some architectures need to do book-keeping after
61431 * a ptrace attach.
61432 */
61433 - if (!ret)
61434 + if (!ret) {
61435 arch_ptrace_attach(child);
61436 + gr_audit_ptrace(child);
61437 + }
61438 goto out_put_task_struct;
61439 }
61440
61441 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
61442 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61443 if (copied != sizeof(tmp))
61444 return -EIO;
61445 - return put_user(tmp, (unsigned long __user *)data);
61446 + return put_user(tmp, (__force unsigned long __user *)data);
61447 }
61448
61449 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
61450 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
61451 siginfo_t siginfo;
61452 int ret;
61453
61454 + pax_track_stack();
61455 +
61456 switch (request) {
61457 case PTRACE_PEEKTEXT:
61458 case PTRACE_PEEKDATA:
61459 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
61460 goto out;
61461 }
61462
61463 + if (gr_handle_ptrace(child, request)) {
61464 + ret = -EPERM;
61465 + goto out_put_task_struct;
61466 + }
61467 +
61468 if (request == PTRACE_ATTACH) {
61469 ret = ptrace_attach(child);
61470 /*
61471 * Some architectures need to do book-keeping after
61472 * a ptrace attach.
61473 */
61474 - if (!ret)
61475 + if (!ret) {
61476 arch_ptrace_attach(child);
61477 + gr_audit_ptrace(child);
61478 + }
61479 goto out_put_task_struct;
61480 }
61481
61482 diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
61483 --- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
61484 +++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
61485 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61486 { 0 };
61487 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61488 { 0 };
61489 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61490 -static atomic_t n_rcu_torture_alloc;
61491 -static atomic_t n_rcu_torture_alloc_fail;
61492 -static atomic_t n_rcu_torture_free;
61493 -static atomic_t n_rcu_torture_mberror;
61494 -static atomic_t n_rcu_torture_error;
61495 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61496 +static atomic_unchecked_t n_rcu_torture_alloc;
61497 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61498 +static atomic_unchecked_t n_rcu_torture_free;
61499 +static atomic_unchecked_t n_rcu_torture_mberror;
61500 +static atomic_unchecked_t n_rcu_torture_error;
61501 static long n_rcu_torture_boost_ktrerror;
61502 static long n_rcu_torture_boost_rterror;
61503 static long n_rcu_torture_boost_failure;
61504 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
61505
61506 spin_lock_bh(&rcu_torture_lock);
61507 if (list_empty(&rcu_torture_freelist)) {
61508 - atomic_inc(&n_rcu_torture_alloc_fail);
61509 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61510 spin_unlock_bh(&rcu_torture_lock);
61511 return NULL;
61512 }
61513 - atomic_inc(&n_rcu_torture_alloc);
61514 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61515 p = rcu_torture_freelist.next;
61516 list_del_init(p);
61517 spin_unlock_bh(&rcu_torture_lock);
61518 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
61519 static void
61520 rcu_torture_free(struct rcu_torture *p)
61521 {
61522 - atomic_inc(&n_rcu_torture_free);
61523 + atomic_inc_unchecked(&n_rcu_torture_free);
61524 spin_lock_bh(&rcu_torture_lock);
61525 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61526 spin_unlock_bh(&rcu_torture_lock);
61527 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
61528 i = rp->rtort_pipe_count;
61529 if (i > RCU_TORTURE_PIPE_LEN)
61530 i = RCU_TORTURE_PIPE_LEN;
61531 - atomic_inc(&rcu_torture_wcount[i]);
61532 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61533 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61534 rp->rtort_mbtest = 0;
61535 rcu_torture_free(rp);
61536 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
61537 i = rp->rtort_pipe_count;
61538 if (i > RCU_TORTURE_PIPE_LEN)
61539 i = RCU_TORTURE_PIPE_LEN;
61540 - atomic_inc(&rcu_torture_wcount[i]);
61541 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61542 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61543 rp->rtort_mbtest = 0;
61544 list_del(&rp->rtort_free);
61545 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
61546 i = old_rp->rtort_pipe_count;
61547 if (i > RCU_TORTURE_PIPE_LEN)
61548 i = RCU_TORTURE_PIPE_LEN;
61549 - atomic_inc(&rcu_torture_wcount[i]);
61550 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61551 old_rp->rtort_pipe_count++;
61552 cur_ops->deferred_free(old_rp);
61553 }
61554 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
61555 return;
61556 }
61557 if (p->rtort_mbtest == 0)
61558 - atomic_inc(&n_rcu_torture_mberror);
61559 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61560 spin_lock(&rand_lock);
61561 cur_ops->read_delay(&rand);
61562 n_rcu_torture_timers++;
61563 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
61564 continue;
61565 }
61566 if (p->rtort_mbtest == 0)
61567 - atomic_inc(&n_rcu_torture_mberror);
61568 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61569 cur_ops->read_delay(&rand);
61570 preempt_disable();
61571 pipe_count = p->rtort_pipe_count;
61572 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
61573 rcu_torture_current,
61574 rcu_torture_current_version,
61575 list_empty(&rcu_torture_freelist),
61576 - atomic_read(&n_rcu_torture_alloc),
61577 - atomic_read(&n_rcu_torture_alloc_fail),
61578 - atomic_read(&n_rcu_torture_free),
61579 - atomic_read(&n_rcu_torture_mberror),
61580 + atomic_read_unchecked(&n_rcu_torture_alloc),
61581 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61582 + atomic_read_unchecked(&n_rcu_torture_free),
61583 + atomic_read_unchecked(&n_rcu_torture_mberror),
61584 n_rcu_torture_boost_ktrerror,
61585 n_rcu_torture_boost_rterror,
61586 n_rcu_torture_boost_failure,
61587 n_rcu_torture_boosts,
61588 n_rcu_torture_timers);
61589 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
61590 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
61591 n_rcu_torture_boost_ktrerror != 0 ||
61592 n_rcu_torture_boost_rterror != 0 ||
61593 n_rcu_torture_boost_failure != 0)
61594 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
61595 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61596 if (i > 1) {
61597 cnt += sprintf(&page[cnt], "!!! ");
61598 - atomic_inc(&n_rcu_torture_error);
61599 + atomic_inc_unchecked(&n_rcu_torture_error);
61600 WARN_ON_ONCE(1);
61601 }
61602 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61603 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
61604 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61605 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61606 cnt += sprintf(&page[cnt], " %d",
61607 - atomic_read(&rcu_torture_wcount[i]));
61608 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61609 }
61610 cnt += sprintf(&page[cnt], "\n");
61611 if (cur_ops->stats)
61612 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
61613
61614 if (cur_ops->cleanup)
61615 cur_ops->cleanup();
61616 - if (atomic_read(&n_rcu_torture_error))
61617 + if (atomic_read_unchecked(&n_rcu_torture_error))
61618 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
61619 else
61620 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
61621 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
61622
61623 rcu_torture_current = NULL;
61624 rcu_torture_current_version = 0;
61625 - atomic_set(&n_rcu_torture_alloc, 0);
61626 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61627 - atomic_set(&n_rcu_torture_free, 0);
61628 - atomic_set(&n_rcu_torture_mberror, 0);
61629 - atomic_set(&n_rcu_torture_error, 0);
61630 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61631 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61632 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61633 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61634 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61635 n_rcu_torture_boost_ktrerror = 0;
61636 n_rcu_torture_boost_rterror = 0;
61637 n_rcu_torture_boost_failure = 0;
61638 n_rcu_torture_boosts = 0;
61639 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61640 - atomic_set(&rcu_torture_wcount[i], 0);
61641 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61642 for_each_possible_cpu(cpu) {
61643 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61644 per_cpu(rcu_torture_count, cpu)[i] = 0;
61645 diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
61646 --- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
61647 +++ linux-3.0.4/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
61648 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
61649 /*
61650 * Do softirq processing for the current CPU.
61651 */
61652 -static void rcu_process_callbacks(struct softirq_action *unused)
61653 +static void rcu_process_callbacks(void)
61654 {
61655 __rcu_process_callbacks(&rcu_sched_state,
61656 &__get_cpu_var(rcu_sched_data));
61657 diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
61658 --- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
61659 +++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
61660 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
61661
61662 /* Clean up and exit. */
61663 smp_mb(); /* ensure expedited GP seen before counter increment. */
61664 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
61665 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
61666 unlock_mb_ret:
61667 mutex_unlock(&sync_rcu_preempt_exp_mutex);
61668 mb_ret:
61669 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
61670
61671 #else /* #ifndef CONFIG_SMP */
61672
61673 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
61674 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
61675 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
61676 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
61677
61678 static int synchronize_sched_expedited_cpu_stop(void *data)
61679 {
61680 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
61681 int firstsnap, s, snap, trycount = 0;
61682
61683 /* Note that atomic_inc_return() implies full memory barrier. */
61684 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
61685 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
61686 get_online_cpus();
61687
61688 /*
61689 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
61690 }
61691
61692 /* Check to see if someone else did our work for us. */
61693 - s = atomic_read(&sync_sched_expedited_done);
61694 + s = atomic_read_unchecked(&sync_sched_expedited_done);
61695 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
61696 smp_mb(); /* ensure test happens before caller kfree */
61697 return;
61698 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
61699 * grace period works for us.
61700 */
61701 get_online_cpus();
61702 - snap = atomic_read(&sync_sched_expedited_started) - 1;
61703 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
61704 smp_mb(); /* ensure read is before try_stop_cpus(). */
61705 }
61706
61707 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
61708 * than we did beat us to the punch.
61709 */
61710 do {
61711 - s = atomic_read(&sync_sched_expedited_done);
61712 + s = atomic_read_unchecked(&sync_sched_expedited_done);
61713 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
61714 smp_mb(); /* ensure test happens before caller kfree */
61715 break;
61716 }
61717 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
61718 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
61719
61720 put_online_cpus();
61721 }
61722 diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
61723 --- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
61724 +++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
61725 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
61726 };
61727 ssize_t ret;
61728
61729 + pax_track_stack();
61730 +
61731 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61732 return 0;
61733 if (splice_grow_spd(pipe, &spd))
61734 diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
61735 --- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
61736 +++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
61737 @@ -141,8 +141,18 @@ static const struct file_operations proc
61738
61739 static int __init ioresources_init(void)
61740 {
61741 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61742 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61743 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61744 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61745 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61746 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61747 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61748 +#endif
61749 +#else
61750 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61751 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61752 +#endif
61753 return 0;
61754 }
61755 __initcall(ioresources_init);
61756 diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
61757 --- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
61758 +++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
61759 @@ -20,7 +20,7 @@
61760 #define MAX_RT_TEST_MUTEXES 8
61761
61762 static spinlock_t rttest_lock;
61763 -static atomic_t rttest_event;
61764 +static atomic_unchecked_t rttest_event;
61765
61766 struct test_thread_data {
61767 int opcode;
61768 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
61769
61770 case RTTEST_LOCKCONT:
61771 td->mutexes[td->opdata] = 1;
61772 - td->event = atomic_add_return(1, &rttest_event);
61773 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61774 return 0;
61775
61776 case RTTEST_RESET:
61777 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
61778 return 0;
61779
61780 case RTTEST_RESETEVENT:
61781 - atomic_set(&rttest_event, 0);
61782 + atomic_set_unchecked(&rttest_event, 0);
61783 return 0;
61784
61785 default:
61786 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
61787 return ret;
61788
61789 td->mutexes[id] = 1;
61790 - td->event = atomic_add_return(1, &rttest_event);
61791 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61792 rt_mutex_lock(&mutexes[id]);
61793 - td->event = atomic_add_return(1, &rttest_event);
61794 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61795 td->mutexes[id] = 4;
61796 return 0;
61797
61798 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
61799 return ret;
61800
61801 td->mutexes[id] = 1;
61802 - td->event = atomic_add_return(1, &rttest_event);
61803 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61804 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61805 - td->event = atomic_add_return(1, &rttest_event);
61806 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61807 td->mutexes[id] = ret ? 0 : 4;
61808 return ret ? -EINTR : 0;
61809
61810 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
61811 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61812 return ret;
61813
61814 - td->event = atomic_add_return(1, &rttest_event);
61815 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61816 rt_mutex_unlock(&mutexes[id]);
61817 - td->event = atomic_add_return(1, &rttest_event);
61818 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61819 td->mutexes[id] = 0;
61820 return 0;
61821
61822 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
61823 break;
61824
61825 td->mutexes[dat] = 2;
61826 - td->event = atomic_add_return(1, &rttest_event);
61827 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61828 break;
61829
61830 default:
61831 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
61832 return;
61833
61834 td->mutexes[dat] = 3;
61835 - td->event = atomic_add_return(1, &rttest_event);
61836 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61837 break;
61838
61839 case RTTEST_LOCKNOWAIT:
61840 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
61841 return;
61842
61843 td->mutexes[dat] = 1;
61844 - td->event = atomic_add_return(1, &rttest_event);
61845 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61846 return;
61847
61848 default:
61849 diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
61850 --- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
61851 +++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
61852 @@ -7,7 +7,7 @@
61853
61854 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
61855 static struct autogroup autogroup_default;
61856 -static atomic_t autogroup_seq_nr;
61857 +static atomic_unchecked_t autogroup_seq_nr;
61858
61859 static void __init autogroup_init(struct task_struct *init_task)
61860 {
61861 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
61862
61863 kref_init(&ag->kref);
61864 init_rwsem(&ag->lock);
61865 - ag->id = atomic_inc_return(&autogroup_seq_nr);
61866 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
61867 ag->tg = tg;
61868 #ifdef CONFIG_RT_GROUP_SCHED
61869 /*
61870 diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
61871 --- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
61872 +++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
61873 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
61874 struct rq *rq;
61875 int cpu;
61876
61877 + pax_track_stack();
61878 +
61879 need_resched:
61880 preempt_disable();
61881 cpu = smp_processor_id();
61882 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
61883 /* convert nice value [19,-20] to rlimit style value [1,40] */
61884 int nice_rlim = 20 - nice;
61885
61886 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61887 +
61888 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
61889 capable(CAP_SYS_NICE));
61890 }
61891 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61892 if (nice > 19)
61893 nice = 19;
61894
61895 - if (increment < 0 && !can_nice(current, nice))
61896 + if (increment < 0 && (!can_nice(current, nice) ||
61897 + gr_handle_chroot_nice()))
61898 return -EPERM;
61899
61900 retval = security_task_setnice(current, nice);
61901 @@ -5111,6 +5116,7 @@ recheck:
61902 unsigned long rlim_rtprio =
61903 task_rlimit(p, RLIMIT_RTPRIO);
61904
61905 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
61906 /* can't set/change the rt policy */
61907 if (policy != p->policy && !rlim_rtprio)
61908 return -EPERM;
61909 diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
61910 --- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
61911 +++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
61912 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
61913 * run_rebalance_domains is triggered when needed from the scheduler tick.
61914 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
61915 */
61916 -static void run_rebalance_domains(struct softirq_action *h)
61917 +static void run_rebalance_domains(void)
61918 {
61919 int this_cpu = smp_processor_id();
61920 struct rq *this_rq = cpu_rq(this_cpu);
61921 diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
61922 --- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
61923 +++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
61924 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
61925
61926 int print_fatal_signals __read_mostly;
61927
61928 -static void __user *sig_handler(struct task_struct *t, int sig)
61929 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61930 {
61931 return t->sighand->action[sig - 1].sa.sa_handler;
61932 }
61933
61934 -static int sig_handler_ignored(void __user *handler, int sig)
61935 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61936 {
61937 /* Is it explicitly or implicitly ignored? */
61938 return handler == SIG_IGN ||
61939 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
61940 static int sig_task_ignored(struct task_struct *t, int sig,
61941 int from_ancestor_ns)
61942 {
61943 - void __user *handler;
61944 + __sighandler_t handler;
61945
61946 handler = sig_handler(t, sig);
61947
61948 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
61949 atomic_inc(&user->sigpending);
61950 rcu_read_unlock();
61951
61952 + if (!override_rlimit)
61953 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61954 +
61955 if (override_rlimit ||
61956 atomic_read(&user->sigpending) <=
61957 task_rlimit(t, RLIMIT_SIGPENDING)) {
61958 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
61959
61960 int unhandled_signal(struct task_struct *tsk, int sig)
61961 {
61962 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61963 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61964 if (is_global_init(tsk))
61965 return 1;
61966 if (handler != SIG_IGN && handler != SIG_DFL)
61967 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
61968 }
61969 }
61970
61971 + /* allow glibc communication via tgkill to other threads in our
61972 + thread group */
61973 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
61974 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
61975 + && gr_handle_signal(t, sig))
61976 + return -EPERM;
61977 +
61978 return security_task_kill(t, info, sig, 0);
61979 }
61980
61981 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
61982 return send_signal(sig, info, p, 1);
61983 }
61984
61985 -static int
61986 +int
61987 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61988 {
61989 return send_signal(sig, info, t, 0);
61990 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
61991 unsigned long int flags;
61992 int ret, blocked, ignored;
61993 struct k_sigaction *action;
61994 + int is_unhandled = 0;
61995
61996 spin_lock_irqsave(&t->sighand->siglock, flags);
61997 action = &t->sighand->action[sig-1];
61998 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
61999 }
62000 if (action->sa.sa_handler == SIG_DFL)
62001 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62002 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62003 + is_unhandled = 1;
62004 ret = specific_send_sig_info(sig, info, t);
62005 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62006
62007 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
62008 + normal operation */
62009 + if (is_unhandled) {
62010 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62011 + gr_handle_crash(t, sig);
62012 + }
62013 +
62014 return ret;
62015 }
62016
62017 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
62018 ret = check_kill_permission(sig, info, p);
62019 rcu_read_unlock();
62020
62021 - if (!ret && sig)
62022 + if (!ret && sig) {
62023 ret = do_send_sig_info(sig, info, p, true);
62024 + if (!ret)
62025 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62026 + }
62027
62028 return ret;
62029 }
62030 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62031 {
62032 siginfo_t info;
62033
62034 + pax_track_stack();
62035 +
62036 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62037
62038 memset(&info, 0, sizeof info);
62039 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62040 int error = -ESRCH;
62041
62042 rcu_read_lock();
62043 - p = find_task_by_vpid(pid);
62044 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62045 + /* allow glibc communication via tgkill to other threads in our
62046 + thread group */
62047 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62048 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
62049 + p = find_task_by_vpid_unrestricted(pid);
62050 + else
62051 +#endif
62052 + p = find_task_by_vpid(pid);
62053 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62054 error = check_kill_permission(sig, info, p);
62055 /*
62056 diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62057 --- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
62058 +++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
62059 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62060 }
62061 EXPORT_SYMBOL(smp_call_function);
62062
62063 -void ipi_call_lock(void)
62064 +void ipi_call_lock(void) __acquires(call_function.lock)
62065 {
62066 raw_spin_lock(&call_function.lock);
62067 }
62068
62069 -void ipi_call_unlock(void)
62070 +void ipi_call_unlock(void) __releases(call_function.lock)
62071 {
62072 raw_spin_unlock(&call_function.lock);
62073 }
62074
62075 -void ipi_call_lock_irq(void)
62076 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62077 {
62078 raw_spin_lock_irq(&call_function.lock);
62079 }
62080
62081 -void ipi_call_unlock_irq(void)
62082 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62083 {
62084 raw_spin_unlock_irq(&call_function.lock);
62085 }
62086 diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62087 --- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
62088 +++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
62089 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62090
62091 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62092
62093 -char *softirq_to_name[NR_SOFTIRQS] = {
62094 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62095 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62096 "TASKLET", "SCHED", "HRTIMER", "RCU"
62097 };
62098 @@ -235,7 +235,7 @@ restart:
62099 kstat_incr_softirqs_this_cpu(vec_nr);
62100
62101 trace_softirq_entry(vec_nr);
62102 - h->action(h);
62103 + h->action();
62104 trace_softirq_exit(vec_nr);
62105 if (unlikely(prev_count != preempt_count())) {
62106 printk(KERN_ERR "huh, entered softirq %u %s %p"
62107 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62108 local_irq_restore(flags);
62109 }
62110
62111 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62112 +void open_softirq(int nr, void (*action)(void))
62113 {
62114 - softirq_vec[nr].action = action;
62115 + pax_open_kernel();
62116 + *(void **)&softirq_vec[nr].action = action;
62117 + pax_close_kernel();
62118 }
62119
62120 /*
62121 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
62122
62123 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62124
62125 -static void tasklet_action(struct softirq_action *a)
62126 +static void tasklet_action(void)
62127 {
62128 struct tasklet_struct *list;
62129
62130 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62131 }
62132 }
62133
62134 -static void tasklet_hi_action(struct softirq_action *a)
62135 +static void tasklet_hi_action(void)
62136 {
62137 struct tasklet_struct *list;
62138
62139 diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62140 --- linux-3.0.4/kernel/sys.c 2011-08-29 23:26:14.000000000 -0400
62141 +++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
62142 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62143 error = -EACCES;
62144 goto out;
62145 }
62146 +
62147 + if (gr_handle_chroot_setpriority(p, niceval)) {
62148 + error = -EACCES;
62149 + goto out;
62150 + }
62151 +
62152 no_nice = security_task_setnice(p, niceval);
62153 if (no_nice) {
62154 error = no_nice;
62155 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62156 goto error;
62157 }
62158
62159 + if (gr_check_group_change(new->gid, new->egid, -1))
62160 + goto error;
62161 +
62162 if (rgid != (gid_t) -1 ||
62163 (egid != (gid_t) -1 && egid != old->gid))
62164 new->sgid = new->egid;
62165 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62166 old = current_cred();
62167
62168 retval = -EPERM;
62169 +
62170 + if (gr_check_group_change(gid, gid, gid))
62171 + goto error;
62172 +
62173 if (nsown_capable(CAP_SETGID))
62174 new->gid = new->egid = new->sgid = new->fsgid = gid;
62175 else if (gid == old->gid || gid == old->sgid)
62176 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62177 if (!new_user)
62178 return -EAGAIN;
62179
62180 + /*
62181 + * We don't fail in case of NPROC limit excess here because too many
62182 + * poorly written programs don't check set*uid() return code, assuming
62183 + * it never fails if called by root. We may still enforce NPROC limit
62184 + * for programs doing set*uid()+execve() by harmlessly deferring the
62185 + * failure to the execve() stage.
62186 + */
62187 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62188 - new_user != INIT_USER) {
62189 - free_uid(new_user);
62190 - return -EAGAIN;
62191 - }
62192 + new_user != INIT_USER)
62193 + current->flags |= PF_NPROC_EXCEEDED;
62194 + else
62195 + current->flags &= ~PF_NPROC_EXCEEDED;
62196
62197 free_uid(new->user);
62198 new->user = new_user;
62199 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62200 goto error;
62201 }
62202
62203 + if (gr_check_user_change(new->uid, new->euid, -1))
62204 + goto error;
62205 +
62206 if (new->uid != old->uid) {
62207 retval = set_user(new);
62208 if (retval < 0)
62209 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62210 old = current_cred();
62211
62212 retval = -EPERM;
62213 +
62214 + if (gr_check_crash_uid(uid))
62215 + goto error;
62216 + if (gr_check_user_change(uid, uid, uid))
62217 + goto error;
62218 +
62219 if (nsown_capable(CAP_SETUID)) {
62220 new->suid = new->uid = uid;
62221 if (uid != old->uid) {
62222 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62223 goto error;
62224 }
62225
62226 + if (gr_check_user_change(ruid, euid, -1))
62227 + goto error;
62228 +
62229 if (ruid != (uid_t) -1) {
62230 new->uid = ruid;
62231 if (ruid != old->uid) {
62232 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62233 goto error;
62234 }
62235
62236 + if (gr_check_group_change(rgid, egid, -1))
62237 + goto error;
62238 +
62239 if (rgid != (gid_t) -1)
62240 new->gid = rgid;
62241 if (egid != (gid_t) -1)
62242 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62243 old = current_cred();
62244 old_fsuid = old->fsuid;
62245
62246 + if (gr_check_user_change(-1, -1, uid))
62247 + goto error;
62248 +
62249 if (uid == old->uid || uid == old->euid ||
62250 uid == old->suid || uid == old->fsuid ||
62251 nsown_capable(CAP_SETUID)) {
62252 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62253 }
62254 }
62255
62256 +error:
62257 abort_creds(new);
62258 return old_fsuid;
62259
62260 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62261 if (gid == old->gid || gid == old->egid ||
62262 gid == old->sgid || gid == old->fsgid ||
62263 nsown_capable(CAP_SETGID)) {
62264 + if (gr_check_group_change(-1, -1, gid))
62265 + goto error;
62266 +
62267 if (gid != old_fsgid) {
62268 new->fsgid = gid;
62269 goto change_okay;
62270 }
62271 }
62272
62273 +error:
62274 abort_creds(new);
62275 return old_fsgid;
62276
62277 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62278 error = get_dumpable(me->mm);
62279 break;
62280 case PR_SET_DUMPABLE:
62281 - if (arg2 < 0 || arg2 > 1) {
62282 + if (arg2 > 1) {
62283 error = -EINVAL;
62284 break;
62285 }
62286 diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62287 --- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62288 +++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62289 @@ -85,6 +85,13 @@
62290
62291
62292 #if defined(CONFIG_SYSCTL)
62293 +#include <linux/grsecurity.h>
62294 +#include <linux/grinternal.h>
62295 +
62296 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62297 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62298 + const int op);
62299 +extern int gr_handle_chroot_sysctl(const int op);
62300
62301 /* External variables not in a header file. */
62302 extern int sysctl_overcommit_memory;
62303 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62304 }
62305
62306 #endif
62307 +extern struct ctl_table grsecurity_table[];
62308
62309 static struct ctl_table root_table[];
62310 static struct ctl_table_root sysctl_table_root;
62311 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
62312 int sysctl_legacy_va_layout;
62313 #endif
62314
62315 +#ifdef CONFIG_PAX_SOFTMODE
62316 +static ctl_table pax_table[] = {
62317 + {
62318 + .procname = "softmode",
62319 + .data = &pax_softmode,
62320 + .maxlen = sizeof(unsigned int),
62321 + .mode = 0600,
62322 + .proc_handler = &proc_dointvec,
62323 + },
62324 +
62325 + { }
62326 +};
62327 +#endif
62328 +
62329 /* The default sysctl tables: */
62330
62331 static struct ctl_table root_table[] = {
62332 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
62333 #endif
62334
62335 static struct ctl_table kern_table[] = {
62336 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62337 + {
62338 + .procname = "grsecurity",
62339 + .mode = 0500,
62340 + .child = grsecurity_table,
62341 + },
62342 +#endif
62343 +
62344 +#ifdef CONFIG_PAX_SOFTMODE
62345 + {
62346 + .procname = "pax",
62347 + .mode = 0500,
62348 + .child = pax_table,
62349 + },
62350 +#endif
62351 +
62352 {
62353 .procname = "sched_child_runs_first",
62354 .data = &sysctl_sched_child_runs_first,
62355 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
62356 .data = &modprobe_path,
62357 .maxlen = KMOD_PATH_LEN,
62358 .mode = 0644,
62359 - .proc_handler = proc_dostring,
62360 + .proc_handler = proc_dostring_modpriv,
62361 },
62362 {
62363 .procname = "modules_disabled",
62364 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
62365 .extra1 = &zero,
62366 .extra2 = &one,
62367 },
62368 +#endif
62369 {
62370 .procname = "kptr_restrict",
62371 .data = &kptr_restrict,
62372 .maxlen = sizeof(int),
62373 .mode = 0644,
62374 .proc_handler = proc_dmesg_restrict,
62375 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62376 + .extra1 = &two,
62377 +#else
62378 .extra1 = &zero,
62379 +#endif
62380 .extra2 = &two,
62381 },
62382 -#endif
62383 {
62384 .procname = "ngroups_max",
62385 .data = &ngroups_max,
62386 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
62387 .proc_handler = proc_dointvec_minmax,
62388 .extra1 = &zero,
62389 },
62390 + {
62391 + .procname = "heap_stack_gap",
62392 + .data = &sysctl_heap_stack_gap,
62393 + .maxlen = sizeof(sysctl_heap_stack_gap),
62394 + .mode = 0644,
62395 + .proc_handler = proc_doulongvec_minmax,
62396 + },
62397 #else
62398 {
62399 .procname = "nr_trim_pages",
62400 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
62401 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
62402 {
62403 int mode;
62404 + int error;
62405 +
62406 + if (table->parent != NULL && table->parent->procname != NULL &&
62407 + table->procname != NULL &&
62408 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62409 + return -EACCES;
62410 + if (gr_handle_chroot_sysctl(op))
62411 + return -EACCES;
62412 + error = gr_handle_sysctl(table, op);
62413 + if (error)
62414 + return error;
62415
62416 if (root->permissions)
62417 mode = root->permissions(root, current->nsproxy, table);
62418 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
62419 buffer, lenp, ppos);
62420 }
62421
62422 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62423 + void __user *buffer, size_t *lenp, loff_t *ppos)
62424 +{
62425 + if (write && !capable(CAP_SYS_MODULE))
62426 + return -EPERM;
62427 +
62428 + return _proc_do_string(table->data, table->maxlen, write,
62429 + buffer, lenp, ppos);
62430 +}
62431 +
62432 static size_t proc_skip_spaces(char **buf)
62433 {
62434 size_t ret;
62435 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
62436 len = strlen(tmp);
62437 if (len > *size)
62438 len = *size;
62439 + if (len > sizeof(tmp))
62440 + len = sizeof(tmp);
62441 if (copy_to_user(*buf, tmp, len))
62442 return -EFAULT;
62443 *size -= len;
62444 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
62445 *i = val;
62446 } else {
62447 val = convdiv * (*i) / convmul;
62448 - if (!first)
62449 + if (!first) {
62450 err = proc_put_char(&buffer, &left, '\t');
62451 + if (err)
62452 + break;
62453 + }
62454 err = proc_put_long(&buffer, &left, val, false);
62455 if (err)
62456 break;
62457 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
62458 return -ENOSYS;
62459 }
62460
62461 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62462 + void __user *buffer, size_t *lenp, loff_t *ppos)
62463 +{
62464 + return -ENOSYS;
62465 +}
62466 +
62467 int proc_dointvec(struct ctl_table *table, int write,
62468 void __user *buffer, size_t *lenp, loff_t *ppos)
62469 {
62470 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62471 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62472 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62473 EXPORT_SYMBOL(proc_dostring);
62474 +EXPORT_SYMBOL(proc_dostring_modpriv);
62475 EXPORT_SYMBOL(proc_doulongvec_minmax);
62476 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62477 EXPORT_SYMBOL(register_sysctl_table);
62478 diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
62479 --- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
62480 +++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
62481 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
62482 set_fail(&fail, table, "Directory with extra2");
62483 } else {
62484 if ((table->proc_handler == proc_dostring) ||
62485 + (table->proc_handler == proc_dostring_modpriv) ||
62486 (table->proc_handler == proc_dointvec) ||
62487 (table->proc_handler == proc_dointvec_minmax) ||
62488 (table->proc_handler == proc_dointvec_jiffies) ||
62489 diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
62490 --- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
62491 +++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
62492 @@ -27,9 +27,12 @@
62493 #include <linux/cgroup.h>
62494 #include <linux/fs.h>
62495 #include <linux/file.h>
62496 +#include <linux/grsecurity.h>
62497 #include <net/genetlink.h>
62498 #include <asm/atomic.h>
62499
62500 +extern int gr_is_taskstats_denied(int pid);
62501 +
62502 /*
62503 * Maximum length of a cpumask that can be specified in
62504 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62505 @@ -558,6 +561,9 @@ err:
62506
62507 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
62508 {
62509 + if (gr_is_taskstats_denied(current->pid))
62510 + return -EACCES;
62511 +
62512 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
62513 return cmd_attr_register_cpumask(info);
62514 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
62515 diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
62516 --- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
62517 +++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
62518 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
62519 {
62520 int error = 0;
62521 int i;
62522 - struct k_clock alarm_clock = {
62523 + static struct k_clock alarm_clock = {
62524 .clock_getres = alarm_clock_getres,
62525 .clock_get = alarm_clock_get,
62526 .timer_create = alarm_timer_create,
62527 diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
62528 --- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
62529 +++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
62530 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
62531 * then clear the broadcast bit.
62532 */
62533 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62534 - int cpu = smp_processor_id();
62535 + cpu = smp_processor_id();
62536
62537 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62538 tick_broadcast_clear_oneshot(cpu);
62539 diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
62540 --- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
62541 +++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
62542 @@ -14,6 +14,7 @@
62543 #include <linux/init.h>
62544 #include <linux/mm.h>
62545 #include <linux/sched.h>
62546 +#include <linux/grsecurity.h>
62547 #include <linux/syscore_ops.h>
62548 #include <linux/clocksource.h>
62549 #include <linux/jiffies.h>
62550 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
62551 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62552 return -EINVAL;
62553
62554 + gr_log_timechange();
62555 +
62556 write_seqlock_irqsave(&xtime_lock, flags);
62557
62558 timekeeping_forward_now();
62559 diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
62560 --- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
62561 +++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
62562 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62563
62564 static void print_name_offset(struct seq_file *m, void *sym)
62565 {
62566 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62567 + SEQ_printf(m, "<%p>", NULL);
62568 +#else
62569 char symname[KSYM_NAME_LEN];
62570
62571 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62572 SEQ_printf(m, "<%pK>", sym);
62573 else
62574 SEQ_printf(m, "%s", symname);
62575 +#endif
62576 }
62577
62578 static void
62579 @@ -112,7 +116,11 @@ next_one:
62580 static void
62581 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62582 {
62583 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62584 + SEQ_printf(m, " .base: %p\n", NULL);
62585 +#else
62586 SEQ_printf(m, " .base: %pK\n", base);
62587 +#endif
62588 SEQ_printf(m, " .index: %d\n",
62589 base->index);
62590 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62591 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
62592 {
62593 struct proc_dir_entry *pe;
62594
62595 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62596 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62597 +#else
62598 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62599 +#endif
62600 if (!pe)
62601 return -ENOMEM;
62602 return 0;
62603 diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
62604 --- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
62605 +++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
62606 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62607 static unsigned long nr_entries;
62608 static struct entry entries[MAX_ENTRIES];
62609
62610 -static atomic_t overflow_count;
62611 +static atomic_unchecked_t overflow_count;
62612
62613 /*
62614 * The entries are in a hash-table, for fast lookup:
62615 @@ -140,7 +140,7 @@ static void reset_entries(void)
62616 nr_entries = 0;
62617 memset(entries, 0, sizeof(entries));
62618 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62619 - atomic_set(&overflow_count, 0);
62620 + atomic_set_unchecked(&overflow_count, 0);
62621 }
62622
62623 static struct entry *alloc_entry(void)
62624 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62625 if (likely(entry))
62626 entry->count++;
62627 else
62628 - atomic_inc(&overflow_count);
62629 + atomic_inc_unchecked(&overflow_count);
62630
62631 out_unlock:
62632 raw_spin_unlock_irqrestore(lock, flags);
62633 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62634
62635 static void print_name_offset(struct seq_file *m, unsigned long addr)
62636 {
62637 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62638 + seq_printf(m, "<%p>", NULL);
62639 +#else
62640 char symname[KSYM_NAME_LEN];
62641
62642 if (lookup_symbol_name(addr, symname) < 0)
62643 seq_printf(m, "<%p>", (void *)addr);
62644 else
62645 seq_printf(m, "%s", symname);
62646 +#endif
62647 }
62648
62649 static int tstats_show(struct seq_file *m, void *v)
62650 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62651
62652 seq_puts(m, "Timer Stats Version: v0.2\n");
62653 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62654 - if (atomic_read(&overflow_count))
62655 + if (atomic_read_unchecked(&overflow_count))
62656 seq_printf(m, "Overflow: %d entries\n",
62657 - atomic_read(&overflow_count));
62658 + atomic_read_unchecked(&overflow_count));
62659
62660 for (i = 0; i < nr_entries; i++) {
62661 entry = entries + i;
62662 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
62663 {
62664 struct proc_dir_entry *pe;
62665
62666 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62667 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62668 +#else
62669 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62670 +#endif
62671 if (!pe)
62672 return -ENOMEM;
62673 return 0;
62674 diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
62675 --- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
62676 +++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
62677 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
62678 return error;
62679
62680 if (tz) {
62681 + /* we log in do_settimeofday called below, so don't log twice
62682 + */
62683 + if (!tv)
62684 + gr_log_timechange();
62685 +
62686 /* SMP safe, global irq locking makes it work. */
62687 sys_tz = *tz;
62688 update_vsyscall_tz();
62689 diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
62690 --- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
62691 +++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
62692 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
62693 /*
62694 * This function runs timers and the timer-tq in bottom half context.
62695 */
62696 -static void run_timer_softirq(struct softirq_action *h)
62697 +static void run_timer_softirq(void)
62698 {
62699 struct tvec_base *base = __this_cpu_read(tvec_bases);
62700
62701 diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
62702 --- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
62703 +++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
62704 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
62705 struct blk_trace *bt = filp->private_data;
62706 char buf[16];
62707
62708 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62709 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62710
62711 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62712 }
62713 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
62714 return 1;
62715
62716 bt = buf->chan->private_data;
62717 - atomic_inc(&bt->dropped);
62718 + atomic_inc_unchecked(&bt->dropped);
62719 return 0;
62720 }
62721
62722 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
62723
62724 bt->dir = dir;
62725 bt->dev = dev;
62726 - atomic_set(&bt->dropped, 0);
62727 + atomic_set_unchecked(&bt->dropped, 0);
62728
62729 ret = -EIO;
62730 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62731 diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
62732 --- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
62733 +++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
62734 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
62735 if (unlikely(ftrace_disabled))
62736 return 0;
62737
62738 + ret = ftrace_arch_code_modify_prepare();
62739 + FTRACE_WARN_ON(ret);
62740 + if (ret)
62741 + return 0;
62742 +
62743 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62744 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62745 if (ret) {
62746 ftrace_bug(ret, ip);
62747 - return 0;
62748 }
62749 - return 1;
62750 + return ret ? 0 : 1;
62751 }
62752
62753 /*
62754 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
62755
62756 int
62757 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
62758 - void *data)
62759 + void *data)
62760 {
62761 struct ftrace_func_probe *entry;
62762 struct ftrace_page *pg;
62763 diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
62764 --- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
62765 +++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
62766 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
62767 size_t rem;
62768 unsigned int i;
62769
62770 + pax_track_stack();
62771 +
62772 if (splice_grow_spd(pipe, &spd))
62773 return -ENOMEM;
62774
62775 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
62776 int entries, size, i;
62777 size_t ret;
62778
62779 + pax_track_stack();
62780 +
62781 if (splice_grow_spd(pipe, &spd))
62782 return -ENOMEM;
62783
62784 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
62785 };
62786 #endif
62787
62788 -static struct dentry *d_tracer;
62789 -
62790 struct dentry *tracing_init_dentry(void)
62791 {
62792 + static struct dentry *d_tracer;
62793 static int once;
62794
62795 if (d_tracer)
62796 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
62797 return d_tracer;
62798 }
62799
62800 -static struct dentry *d_percpu;
62801 -
62802 struct dentry *tracing_dentry_percpu(void)
62803 {
62804 + static struct dentry *d_percpu;
62805 static int once;
62806 struct dentry *d_tracer;
62807
62808 diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
62809 --- linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
62810 +++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
62811 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
62812 struct ftrace_module_file_ops {
62813 struct list_head list;
62814 struct module *mod;
62815 - struct file_operations id;
62816 - struct file_operations enable;
62817 - struct file_operations format;
62818 - struct file_operations filter;
62819 };
62820
62821 static struct ftrace_module_file_ops *
62822 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
62823
62824 file_ops->mod = mod;
62825
62826 - file_ops->id = ftrace_event_id_fops;
62827 - file_ops->id.owner = mod;
62828 -
62829 - file_ops->enable = ftrace_enable_fops;
62830 - file_ops->enable.owner = mod;
62831 -
62832 - file_ops->filter = ftrace_event_filter_fops;
62833 - file_ops->filter.owner = mod;
62834 -
62835 - file_ops->format = ftrace_event_format_fops;
62836 - file_ops->format.owner = mod;
62837 + pax_open_kernel();
62838 + *(void **)&mod->trace_id.owner = mod;
62839 + *(void **)&mod->trace_enable.owner = mod;
62840 + *(void **)&mod->trace_filter.owner = mod;
62841 + *(void **)&mod->trace_format.owner = mod;
62842 + pax_close_kernel();
62843
62844 list_add(&file_ops->list, &ftrace_module_file_list);
62845
62846 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
62847
62848 for_each_event(call, start, end) {
62849 __trace_add_event_call(*call, mod,
62850 - &file_ops->id, &file_ops->enable,
62851 - &file_ops->filter, &file_ops->format);
62852 + &mod->trace_id, &mod->trace_enable,
62853 + &mod->trace_filter, &mod->trace_format);
62854 }
62855 }
62856
62857 diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
62858 --- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
62859 +++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
62860 @@ -24,7 +24,7 @@ struct header_iter {
62861 static struct trace_array *mmio_trace_array;
62862 static bool overrun_detected;
62863 static unsigned long prev_overruns;
62864 -static atomic_t dropped_count;
62865 +static atomic_unchecked_t dropped_count;
62866
62867 static void mmio_reset_data(struct trace_array *tr)
62868 {
62869 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
62870
62871 static unsigned long count_overruns(struct trace_iterator *iter)
62872 {
62873 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62874 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62875 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62876
62877 if (over > prev_overruns)
62878 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
62879 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62880 sizeof(*entry), 0, pc);
62881 if (!event) {
62882 - atomic_inc(&dropped_count);
62883 + atomic_inc_unchecked(&dropped_count);
62884 return;
62885 }
62886 entry = ring_buffer_event_data(event);
62887 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
62888 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62889 sizeof(*entry), 0, pc);
62890 if (!event) {
62891 - atomic_inc(&dropped_count);
62892 + atomic_inc_unchecked(&dropped_count);
62893 return;
62894 }
62895 entry = ring_buffer_event_data(event);
62896 diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
62897 --- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
62898 +++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
62899 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
62900
62901 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62902 if (!IS_ERR(p)) {
62903 - p = mangle_path(s->buffer + s->len, p, "\n");
62904 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62905 if (p) {
62906 s->len = p - s->buffer;
62907 return 1;
62908 diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
62909 --- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
62910 +++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
62911 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62912 return;
62913
62914 /* we do not handle interrupt stacks yet */
62915 - if (!object_is_on_stack(&this_size))
62916 + if (!object_starts_on_stack(&this_size))
62917 return;
62918
62919 local_irq_save(flags);
62920 diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
62921 --- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
62922 +++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
62923 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
62924 int cpu;
62925 pid_t pid;
62926 /* Can be inserted from interrupt or user context, need to be atomic */
62927 - atomic_t inserted;
62928 + atomic_unchecked_t inserted;
62929 /*
62930 * Don't need to be atomic, works are serialized in a single workqueue thread
62931 * on a single CPU.
62932 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
62933 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62934 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62935 if (node->pid == wq_thread->pid) {
62936 - atomic_inc(&node->inserted);
62937 + atomic_inc_unchecked(&node->inserted);
62938 goto found;
62939 }
62940 }
62941 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
62942 tsk = get_pid_task(pid, PIDTYPE_PID);
62943 if (tsk) {
62944 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62945 - atomic_read(&cws->inserted), cws->executed,
62946 + atomic_read_unchecked(&cws->inserted), cws->executed,
62947 tsk->comm);
62948 put_task_struct(tsk);
62949 }
62950 diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
62951 --- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
62952 +++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
62953 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
62954 return BUG_TRAP_TYPE_NONE;
62955
62956 bug = find_bug(bugaddr);
62957 + if (!bug)
62958 + return BUG_TRAP_TYPE_NONE;
62959
62960 file = NULL;
62961 line = 0;
62962 diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
62963 --- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
62964 +++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
62965 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
62966 if (limit > 4)
62967 return;
62968
62969 - is_on_stack = object_is_on_stack(addr);
62970 + is_on_stack = object_starts_on_stack(addr);
62971 if (is_on_stack == onstack)
62972 return;
62973
62974 diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
62975 --- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
62976 +++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
62977 @@ -870,7 +870,7 @@ out:
62978
62979 static void check_for_stack(struct device *dev, void *addr)
62980 {
62981 - if (object_is_on_stack(addr))
62982 + if (object_starts_on_stack(addr))
62983 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62984 "stack [addr=%p]\n", addr);
62985 }
62986 diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
62987 --- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
62988 +++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
62989 @@ -13,6 +13,7 @@
62990 #include <linux/init.h>
62991 #include <linux/sort.h>
62992 #include <asm/uaccess.h>
62993 +#include <asm/pgtable.h>
62994
62995 #ifndef ARCH_HAS_SORT_EXTABLE
62996 /*
62997 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
62998 void sort_extable(struct exception_table_entry *start,
62999 struct exception_table_entry *finish)
63000 {
63001 + pax_open_kernel();
63002 sort(start, finish - start, sizeof(struct exception_table_entry),
63003 cmp_ex, NULL);
63004 + pax_close_kernel();
63005 }
63006
63007 #ifdef CONFIG_MODULES
63008 diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63009 --- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
63010 +++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
63011 @@ -269,7 +269,7 @@ static void free(void *where)
63012 malloc_ptr = free_mem_ptr;
63013 }
63014 #else
63015 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63016 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63017 #define free(a) kfree(a)
63018 #endif
63019
63020 diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63021 --- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
63022 +++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
63023 @@ -1088,6 +1088,7 @@ config LATENCYTOP
63024 depends on DEBUG_KERNEL
63025 depends on STACKTRACE_SUPPORT
63026 depends on PROC_FS
63027 + depends on !GRKERNSEC_HIDESYM
63028 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63029 select KALLSYMS
63030 select KALLSYMS_ALL
63031 diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63032 --- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
63033 +++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
63034 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63035 */
63036 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63037 {
63038 - WARN_ON(release == NULL);
63039 + BUG_ON(release == NULL);
63040 WARN_ON(release == (void (*)(struct kref *))kfree);
63041
63042 if (atomic_dec_and_test(&kref->refcount)) {
63043 diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63044 --- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
63045 +++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
63046 @@ -80,7 +80,7 @@ struct radix_tree_preload {
63047 int nr;
63048 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63049 };
63050 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63051 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63052
63053 static inline void *ptr_to_indirect(void *ptr)
63054 {
63055 diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63056 --- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
63057 +++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
63058 @@ -16,6 +16,9 @@
63059 * - scnprintf and vscnprintf
63060 */
63061
63062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63063 +#define __INCLUDED_BY_HIDESYM 1
63064 +#endif
63065 #include <stdarg.h>
63066 #include <linux/module.h>
63067 #include <linux/types.h>
63068 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63069 char sym[KSYM_SYMBOL_LEN];
63070 if (ext == 'B')
63071 sprint_backtrace(sym, value);
63072 - else if (ext != 'f' && ext != 's')
63073 + else if (ext != 'f' && ext != 's' && ext != 'a')
63074 sprint_symbol(sym, value);
63075 else
63076 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63077 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
63078 return string(buf, end, uuid, spec);
63079 }
63080
63081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63082 +int kptr_restrict __read_mostly = 2;
63083 +#else
63084 int kptr_restrict __read_mostly;
63085 +#endif
63086
63087 /*
63088 * Show a '%p' thing. A kernel extension is that the '%p' is followed
63089 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63090 * - 'S' For symbolic direct pointers with offset
63091 * - 's' For symbolic direct pointers without offset
63092 * - 'B' For backtraced symbolic direct pointers with offset
63093 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63094 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63095 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63096 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63097 * - 'M' For a 6-byte MAC address, it prints the address in the
63098 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63099 {
63100 if (!ptr && *fmt != 'K') {
63101 /*
63102 - * Print (null) with the same width as a pointer so it makes
63103 + * Print (nil) with the same width as a pointer so it makes
63104 * tabular output look nice.
63105 */
63106 if (spec.field_width == -1)
63107 spec.field_width = 2 * sizeof(void *);
63108 - return string(buf, end, "(null)", spec);
63109 + return string(buf, end, "(nil)", spec);
63110 }
63111
63112 switch (*fmt) {
63113 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63114 /* Fallthrough */
63115 case 'S':
63116 case 's':
63117 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63118 + break;
63119 +#else
63120 + return symbol_string(buf, end, ptr, spec, *fmt);
63121 +#endif
63122 + case 'A':
63123 + case 'a':
63124 case 'B':
63125 return symbol_string(buf, end, ptr, spec, *fmt);
63126 case 'R':
63127 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
63128 typeof(type) value; \
63129 if (sizeof(type) == 8) { \
63130 args = PTR_ALIGN(args, sizeof(u32)); \
63131 - *(u32 *)&value = *(u32 *)args; \
63132 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63133 + *(u32 *)&value = *(const u32 *)args; \
63134 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63135 } else { \
63136 args = PTR_ALIGN(args, sizeof(type)); \
63137 - value = *(typeof(type) *)args; \
63138 + value = *(const typeof(type) *)args; \
63139 } \
63140 args += sizeof(type); \
63141 value; \
63142 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
63143 case FORMAT_TYPE_STR: {
63144 const char *str_arg = args;
63145 args += strlen(str_arg) + 1;
63146 - str = string(str, end, (char *)str_arg, spec);
63147 + str = string(str, end, str_arg, spec);
63148 break;
63149 }
63150
63151 diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63152 --- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63153 +++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
63154 @@ -0,0 +1 @@
63155 +-grsec
63156 diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63157 --- linux-3.0.4/Makefile 2011-08-29 23:26:13.000000000 -0400
63158 +++ linux-3.0.4/Makefile 2011-08-29 23:26:21.000000000 -0400
63159 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63160
63161 HOSTCC = gcc
63162 HOSTCXX = g++
63163 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63164 -HOSTCXXFLAGS = -O2
63165 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63166 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63167 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63168
63169 # Decide whether to build built-in, modular, or both.
63170 # Normally, just do built-in.
63171 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
63172 KBUILD_CPPFLAGS := -D__KERNEL__
63173
63174 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63175 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63176 -fno-strict-aliasing -fno-common \
63177 -Werror-implicit-function-declaration \
63178 -Wno-format-security \
63179 -fno-delete-null-pointer-checks
63180 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63181 KBUILD_AFLAGS_KERNEL :=
63182 KBUILD_CFLAGS_KERNEL :=
63183 KBUILD_AFLAGS := -D__ASSEMBLY__
63184 @@ -564,6 +567,25 @@ else
63185 KBUILD_CFLAGS += -O2
63186 endif
63187
63188 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
63189 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63190 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
63191 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63192 +endif
63193 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
63194 +gcc-plugins0:
63195 + $(Q)$(MAKE) $(build)=tools/gcc
63196 +gcc-plugins: scripts_basic gcc-plugins0
63197 +else
63198 +gcc-plugins:
63199 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63200 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
63201 +else
63202 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63203 +endif
63204 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63205 +endif
63206 +
63207 include $(srctree)/arch/$(SRCARCH)/Makefile
63208
63209 ifneq ($(CONFIG_FRAME_WARN),0)
63210 @@ -708,7 +730,7 @@ export mod_strip_cmd
63211
63212
63213 ifeq ($(KBUILD_EXTMOD),)
63214 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63215 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63216
63217 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63218 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63219 @@ -907,6 +929,7 @@ define rule_vmlinux-modpost
63220 endef
63221
63222 # vmlinux image - including updated kernel symbols
63223 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63224 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63225 ifdef CONFIG_HEADERS_CHECK
63226 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63227 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
63228 endif
63229
63230 # prepare2 creates a makefile if using a separate output directory
63231 -prepare2: prepare3 outputmakefile asm-generic
63232 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
63233
63234 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
63235 include/config/auto.conf
63236 @@ -1087,6 +1110,7 @@ all: modules
63237 # using awk while concatenating to the final file.
63238
63239 PHONY += modules
63240 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63241 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
63242 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
63243 @$(kecho) ' Building modules, stage 2.';
63244 @@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
63245 $(module-dirs): crmodverdir $(objtree)/Module.symvers
63246 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63247
63248 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63249 modules: $(module-dirs)
63250 @$(kecho) ' Building modules, stage 2.';
63251 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63252 @@ -1404,7 +1429,7 @@ clean: $(clean-dirs)
63253 $(call cmd,rmdirs)
63254 $(call cmd,rmfiles)
63255 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
63256 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
63257 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
63258 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
63259 -o -name '*.symtypes' -o -name 'modules.order' \
63260 -o -name modules.builtin -o -name '.tmp_*.o.*' \
63261 diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
63262 --- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
63263 +++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
63264 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
63265 struct address_space *mapping = file->f_mapping;
63266
63267 if (!mapping->a_ops->readpage)
63268 - return -ENOEXEC;
63269 + return -ENODEV;
63270 file_accessed(file);
63271 vma->vm_ops = &generic_file_vm_ops;
63272 vma->vm_flags |= VM_CAN_NONLINEAR;
63273 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
63274 *pos = i_size_read(inode);
63275
63276 if (limit != RLIM_INFINITY) {
63277 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63278 if (*pos >= limit) {
63279 send_sig(SIGXFSZ, current, 0);
63280 return -EFBIG;
63281 diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
63282 --- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
63283 +++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
63284 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63285 retry:
63286 vma = find_vma(mm, start);
63287
63288 +#ifdef CONFIG_PAX_SEGMEXEC
63289 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63290 + goto out;
63291 +#endif
63292 +
63293 /*
63294 * Make sure the vma is shared, that it supports prefaulting,
63295 * and that the remapped range is valid and fully within
63296 diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
63297 --- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
63298 +++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
63299 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
63300 * So no dangers, even with speculative execution.
63301 */
63302 page = pte_page(pkmap_page_table[i]);
63303 + pax_open_kernel();
63304 pte_clear(&init_mm, (unsigned long)page_address(page),
63305 &pkmap_page_table[i]);
63306 -
63307 + pax_close_kernel();
63308 set_page_address(page, NULL);
63309 need_flush = 1;
63310 }
63311 @@ -186,9 +187,11 @@ start:
63312 }
63313 }
63314 vaddr = PKMAP_ADDR(last_pkmap_nr);
63315 +
63316 + pax_open_kernel();
63317 set_pte_at(&init_mm, vaddr,
63318 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63319 -
63320 + pax_close_kernel();
63321 pkmap_count[last_pkmap_nr] = 1;
63322 set_page_address(page, (void *)vaddr);
63323
63324 diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
63325 --- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
63326 +++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
63327 @@ -702,7 +702,7 @@ out:
63328 * run pte_offset_map on the pmd, if an huge pmd could
63329 * materialize from under us from a different thread.
63330 */
63331 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
63332 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
63333 return VM_FAULT_OOM;
63334 /* if an huge pmd materialized from under us just retry later */
63335 if (unlikely(pmd_trans_huge(*pmd)))
63336 diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
63337 --- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
63338 +++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
63339 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
63340 return 1;
63341 }
63342
63343 +#ifdef CONFIG_PAX_SEGMEXEC
63344 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63345 +{
63346 + struct mm_struct *mm = vma->vm_mm;
63347 + struct vm_area_struct *vma_m;
63348 + unsigned long address_m;
63349 + pte_t *ptep_m;
63350 +
63351 + vma_m = pax_find_mirror_vma(vma);
63352 + if (!vma_m)
63353 + return;
63354 +
63355 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63356 + address_m = address + SEGMEXEC_TASK_SIZE;
63357 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63358 + get_page(page_m);
63359 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
63360 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63361 +}
63362 +#endif
63363 +
63364 /*
63365 * Hugetlb_cow() should be called with page lock of the original hugepage held.
63366 */
63367 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
63368 make_huge_pte(vma, new_page, 1));
63369 page_remove_rmap(old_page);
63370 hugepage_add_new_anon_rmap(new_page, vma, address);
63371 +
63372 +#ifdef CONFIG_PAX_SEGMEXEC
63373 + pax_mirror_huge_pte(vma, address, new_page);
63374 +#endif
63375 +
63376 /* Make the old page be freed below */
63377 new_page = old_page;
63378 mmu_notifier_invalidate_range_end(mm,
63379 @@ -2591,6 +2617,10 @@ retry:
63380 && (vma->vm_flags & VM_SHARED)));
63381 set_huge_pte_at(mm, address, ptep, new_pte);
63382
63383 +#ifdef CONFIG_PAX_SEGMEXEC
63384 + pax_mirror_huge_pte(vma, address, page);
63385 +#endif
63386 +
63387 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63388 /* Optimization, do the COW without a second fault */
63389 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63390 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
63391 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63392 struct hstate *h = hstate_vma(vma);
63393
63394 +#ifdef CONFIG_PAX_SEGMEXEC
63395 + struct vm_area_struct *vma_m;
63396 +#endif
63397 +
63398 ptep = huge_pte_offset(mm, address);
63399 if (ptep) {
63400 entry = huge_ptep_get(ptep);
63401 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
63402 VM_FAULT_SET_HINDEX(h - hstates);
63403 }
63404
63405 +#ifdef CONFIG_PAX_SEGMEXEC
63406 + vma_m = pax_find_mirror_vma(vma);
63407 + if (vma_m) {
63408 + unsigned long address_m;
63409 +
63410 + if (vma->vm_start > vma_m->vm_start) {
63411 + address_m = address;
63412 + address -= SEGMEXEC_TASK_SIZE;
63413 + vma = vma_m;
63414 + h = hstate_vma(vma);
63415 + } else
63416 + address_m = address + SEGMEXEC_TASK_SIZE;
63417 +
63418 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63419 + return VM_FAULT_OOM;
63420 + address_m &= HPAGE_MASK;
63421 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63422 + }
63423 +#endif
63424 +
63425 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63426 if (!ptep)
63427 return VM_FAULT_OOM;
63428 diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
63429 --- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
63430 +++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
63431 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
63432 * in mm/page_alloc.c
63433 */
63434 extern void __free_pages_bootmem(struct page *page, unsigned int order);
63435 +extern void free_compound_page(struct page *page);
63436 extern void prep_compound_page(struct page *page, unsigned long order);
63437 #ifdef CONFIG_MEMORY_FAILURE
63438 extern bool is_free_buddy_page(struct page *page);
63439 diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
63440 --- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
63441 +++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
63442 @@ -240,7 +240,7 @@ config KSM
63443 config DEFAULT_MMAP_MIN_ADDR
63444 int "Low address space to protect from user allocation"
63445 depends on MMU
63446 - default 4096
63447 + default 65536
63448 help
63449 This is the portion of low virtual memory which should be protected
63450 from userspace allocation. Keeping a user from writing to low pages
63451 diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
63452 --- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
63453 +++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
63454 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
63455
63456 for (i = 0; i < object->trace_len; i++) {
63457 void *ptr = (void *)object->trace[i];
63458 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63459 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63460 }
63461 }
63462
63463 diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
63464 --- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
63465 +++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
63466 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
63467 pgoff_t pgoff;
63468 unsigned long new_flags = vma->vm_flags;
63469
63470 +#ifdef CONFIG_PAX_SEGMEXEC
63471 + struct vm_area_struct *vma_m;
63472 +#endif
63473 +
63474 switch (behavior) {
63475 case MADV_NORMAL:
63476 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63477 @@ -110,6 +114,13 @@ success:
63478 /*
63479 * vm_flags is protected by the mmap_sem held in write mode.
63480 */
63481 +
63482 +#ifdef CONFIG_PAX_SEGMEXEC
63483 + vma_m = pax_find_mirror_vma(vma);
63484 + if (vma_m)
63485 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63486 +#endif
63487 +
63488 vma->vm_flags = new_flags;
63489
63490 out:
63491 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
63492 struct vm_area_struct ** prev,
63493 unsigned long start, unsigned long end)
63494 {
63495 +
63496 +#ifdef CONFIG_PAX_SEGMEXEC
63497 + struct vm_area_struct *vma_m;
63498 +#endif
63499 +
63500 *prev = vma;
63501 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63502 return -EINVAL;
63503 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
63504 zap_page_range(vma, start, end - start, &details);
63505 } else
63506 zap_page_range(vma, start, end - start, NULL);
63507 +
63508 +#ifdef CONFIG_PAX_SEGMEXEC
63509 + vma_m = pax_find_mirror_vma(vma);
63510 + if (vma_m) {
63511 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63512 + struct zap_details details = {
63513 + .nonlinear_vma = vma_m,
63514 + .last_index = ULONG_MAX,
63515 + };
63516 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63517 + } else
63518 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63519 + }
63520 +#endif
63521 +
63522 return 0;
63523 }
63524
63525 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63526 if (end < start)
63527 goto out;
63528
63529 +#ifdef CONFIG_PAX_SEGMEXEC
63530 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63531 + if (end > SEGMEXEC_TASK_SIZE)
63532 + goto out;
63533 + } else
63534 +#endif
63535 +
63536 + if (end > TASK_SIZE)
63537 + goto out;
63538 +
63539 error = 0;
63540 if (end == start)
63541 goto out;
63542 diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
63543 --- linux-3.0.4/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
63544 +++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
63545 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
63546 return;
63547
63548 pmd = pmd_offset(pud, start);
63549 +
63550 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63551 pud_clear(pud);
63552 pmd_free_tlb(tlb, pmd, start);
63553 +#endif
63554 +
63555 }
63556
63557 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63558 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
63559 if (end - 1 > ceiling - 1)
63560 return;
63561
63562 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63563 pud = pud_offset(pgd, start);
63564 pgd_clear(pgd);
63565 pud_free_tlb(tlb, pud, start);
63566 +#endif
63567 +
63568 }
63569
63570 /*
63571 @@ -1577,12 +1584,6 @@ no_page_table:
63572 return page;
63573 }
63574
63575 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63576 -{
63577 - return stack_guard_page_start(vma, addr) ||
63578 - stack_guard_page_end(vma, addr+PAGE_SIZE);
63579 -}
63580 -
63581 /**
63582 * __get_user_pages() - pin user pages in memory
63583 * @tsk: task_struct of target task
63584 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
63585 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63586 i = 0;
63587
63588 - do {
63589 + while (nr_pages) {
63590 struct vm_area_struct *vma;
63591
63592 - vma = find_extend_vma(mm, start);
63593 + vma = find_vma(mm, start);
63594 if (!vma && in_gate_area(mm, start)) {
63595 unsigned long pg = start & PAGE_MASK;
63596 pgd_t *pgd;
63597 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
63598 goto next_page;
63599 }
63600
63601 - if (!vma ||
63602 + if (!vma || start < vma->vm_start ||
63603 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63604 !(vm_flags & vma->vm_flags))
63605 return i ? : -EFAULT;
63606 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
63607 int ret;
63608 unsigned int fault_flags = 0;
63609
63610 - /* For mlock, just skip the stack guard page. */
63611 - if (foll_flags & FOLL_MLOCK) {
63612 - if (stack_guard_page(vma, start))
63613 - goto next_page;
63614 - }
63615 if (foll_flags & FOLL_WRITE)
63616 fault_flags |= FAULT_FLAG_WRITE;
63617 if (nonblocking)
63618 @@ -1811,7 +1807,7 @@ next_page:
63619 start += PAGE_SIZE;
63620 nr_pages--;
63621 } while (nr_pages && start < vma->vm_end);
63622 - } while (nr_pages);
63623 + }
63624 return i;
63625 }
63626 EXPORT_SYMBOL(__get_user_pages);
63627 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
63628 page_add_file_rmap(page);
63629 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63630
63631 +#ifdef CONFIG_PAX_SEGMEXEC
63632 + pax_mirror_file_pte(vma, addr, page, ptl);
63633 +#endif
63634 +
63635 retval = 0;
63636 pte_unmap_unlock(pte, ptl);
63637 return retval;
63638 @@ -2052,10 +2052,22 @@ out:
63639 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63640 struct page *page)
63641 {
63642 +
63643 +#ifdef CONFIG_PAX_SEGMEXEC
63644 + struct vm_area_struct *vma_m;
63645 +#endif
63646 +
63647 if (addr < vma->vm_start || addr >= vma->vm_end)
63648 return -EFAULT;
63649 if (!page_count(page))
63650 return -EINVAL;
63651 +
63652 +#ifdef CONFIG_PAX_SEGMEXEC
63653 + vma_m = pax_find_mirror_vma(vma);
63654 + if (vma_m)
63655 + vma_m->vm_flags |= VM_INSERTPAGE;
63656 +#endif
63657 +
63658 vma->vm_flags |= VM_INSERTPAGE;
63659 return insert_page(vma, addr, page, vma->vm_page_prot);
63660 }
63661 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
63662 unsigned long pfn)
63663 {
63664 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63665 + BUG_ON(vma->vm_mirror);
63666
63667 if (addr < vma->vm_start || addr >= vma->vm_end)
63668 return -EFAULT;
63669 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
63670 copy_user_highpage(dst, src, va, vma);
63671 }
63672
63673 +#ifdef CONFIG_PAX_SEGMEXEC
63674 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63675 +{
63676 + struct mm_struct *mm = vma->vm_mm;
63677 + spinlock_t *ptl;
63678 + pte_t *pte, entry;
63679 +
63680 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63681 + entry = *pte;
63682 + if (!pte_present(entry)) {
63683 + if (!pte_none(entry)) {
63684 + BUG_ON(pte_file(entry));
63685 + free_swap_and_cache(pte_to_swp_entry(entry));
63686 + pte_clear_not_present_full(mm, address, pte, 0);
63687 + }
63688 + } else {
63689 + struct page *page;
63690 +
63691 + flush_cache_page(vma, address, pte_pfn(entry));
63692 + entry = ptep_clear_flush(vma, address, pte);
63693 + BUG_ON(pte_dirty(entry));
63694 + page = vm_normal_page(vma, address, entry);
63695 + if (page) {
63696 + update_hiwater_rss(mm);
63697 + if (PageAnon(page))
63698 + dec_mm_counter_fast(mm, MM_ANONPAGES);
63699 + else
63700 + dec_mm_counter_fast(mm, MM_FILEPAGES);
63701 + page_remove_rmap(page);
63702 + page_cache_release(page);
63703 + }
63704 + }
63705 + pte_unmap_unlock(pte, ptl);
63706 +}
63707 +
63708 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63709 + *
63710 + * the ptl of the lower mapped page is held on entry and is not released on exit
63711 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63712 + */
63713 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63714 +{
63715 + struct mm_struct *mm = vma->vm_mm;
63716 + unsigned long address_m;
63717 + spinlock_t *ptl_m;
63718 + struct vm_area_struct *vma_m;
63719 + pmd_t *pmd_m;
63720 + pte_t *pte_m, entry_m;
63721 +
63722 + BUG_ON(!page_m || !PageAnon(page_m));
63723 +
63724 + vma_m = pax_find_mirror_vma(vma);
63725 + if (!vma_m)
63726 + return;
63727 +
63728 + BUG_ON(!PageLocked(page_m));
63729 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63730 + address_m = address + SEGMEXEC_TASK_SIZE;
63731 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63732 + pte_m = pte_offset_map(pmd_m, address_m);
63733 + ptl_m = pte_lockptr(mm, pmd_m);
63734 + if (ptl != ptl_m) {
63735 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63736 + if (!pte_none(*pte_m))
63737 + goto out;
63738 + }
63739 +
63740 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63741 + page_cache_get(page_m);
63742 + page_add_anon_rmap(page_m, vma_m, address_m);
63743 + inc_mm_counter_fast(mm, MM_ANONPAGES);
63744 + set_pte_at(mm, address_m, pte_m, entry_m);
63745 + update_mmu_cache(vma_m, address_m, entry_m);
63746 +out:
63747 + if (ptl != ptl_m)
63748 + spin_unlock(ptl_m);
63749 + pte_unmap(pte_m);
63750 + unlock_page(page_m);
63751 +}
63752 +
63753 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63754 +{
63755 + struct mm_struct *mm = vma->vm_mm;
63756 + unsigned long address_m;
63757 + spinlock_t *ptl_m;
63758 + struct vm_area_struct *vma_m;
63759 + pmd_t *pmd_m;
63760 + pte_t *pte_m, entry_m;
63761 +
63762 + BUG_ON(!page_m || PageAnon(page_m));
63763 +
63764 + vma_m = pax_find_mirror_vma(vma);
63765 + if (!vma_m)
63766 + return;
63767 +
63768 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63769 + address_m = address + SEGMEXEC_TASK_SIZE;
63770 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63771 + pte_m = pte_offset_map(pmd_m, address_m);
63772 + ptl_m = pte_lockptr(mm, pmd_m);
63773 + if (ptl != ptl_m) {
63774 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63775 + if (!pte_none(*pte_m))
63776 + goto out;
63777 + }
63778 +
63779 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63780 + page_cache_get(page_m);
63781 + page_add_file_rmap(page_m);
63782 + inc_mm_counter_fast(mm, MM_FILEPAGES);
63783 + set_pte_at(mm, address_m, pte_m, entry_m);
63784 + update_mmu_cache(vma_m, address_m, entry_m);
63785 +out:
63786 + if (ptl != ptl_m)
63787 + spin_unlock(ptl_m);
63788 + pte_unmap(pte_m);
63789 +}
63790 +
63791 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63792 +{
63793 + struct mm_struct *mm = vma->vm_mm;
63794 + unsigned long address_m;
63795 + spinlock_t *ptl_m;
63796 + struct vm_area_struct *vma_m;
63797 + pmd_t *pmd_m;
63798 + pte_t *pte_m, entry_m;
63799 +
63800 + vma_m = pax_find_mirror_vma(vma);
63801 + if (!vma_m)
63802 + return;
63803 +
63804 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63805 + address_m = address + SEGMEXEC_TASK_SIZE;
63806 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63807 + pte_m = pte_offset_map(pmd_m, address_m);
63808 + ptl_m = pte_lockptr(mm, pmd_m);
63809 + if (ptl != ptl_m) {
63810 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63811 + if (!pte_none(*pte_m))
63812 + goto out;
63813 + }
63814 +
63815 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63816 + set_pte_at(mm, address_m, pte_m, entry_m);
63817 +out:
63818 + if (ptl != ptl_m)
63819 + spin_unlock(ptl_m);
63820 + pte_unmap(pte_m);
63821 +}
63822 +
63823 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63824 +{
63825 + struct page *page_m;
63826 + pte_t entry;
63827 +
63828 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63829 + goto out;
63830 +
63831 + entry = *pte;
63832 + page_m = vm_normal_page(vma, address, entry);
63833 + if (!page_m)
63834 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63835 + else if (PageAnon(page_m)) {
63836 + if (pax_find_mirror_vma(vma)) {
63837 + pte_unmap_unlock(pte, ptl);
63838 + lock_page(page_m);
63839 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63840 + if (pte_same(entry, *pte))
63841 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63842 + else
63843 + unlock_page(page_m);
63844 + }
63845 + } else
63846 + pax_mirror_file_pte(vma, address, page_m, ptl);
63847 +
63848 +out:
63849 + pte_unmap_unlock(pte, ptl);
63850 +}
63851 +#endif
63852 +
63853 /*
63854 * This routine handles present pages, when users try to write
63855 * to a shared page. It is done by copying the page to a new address
63856 @@ -2667,6 +2860,12 @@ gotten:
63857 */
63858 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63859 if (likely(pte_same(*page_table, orig_pte))) {
63860 +
63861 +#ifdef CONFIG_PAX_SEGMEXEC
63862 + if (pax_find_mirror_vma(vma))
63863 + BUG_ON(!trylock_page(new_page));
63864 +#endif
63865 +
63866 if (old_page) {
63867 if (!PageAnon(old_page)) {
63868 dec_mm_counter_fast(mm, MM_FILEPAGES);
63869 @@ -2718,6 +2917,10 @@ gotten:
63870 page_remove_rmap(old_page);
63871 }
63872
63873 +#ifdef CONFIG_PAX_SEGMEXEC
63874 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63875 +#endif
63876 +
63877 /* Free the old page.. */
63878 new_page = old_page;
63879 ret |= VM_FAULT_WRITE;
63880 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
63881 swap_free(entry);
63882 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63883 try_to_free_swap(page);
63884 +
63885 +#ifdef CONFIG_PAX_SEGMEXEC
63886 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63887 +#endif
63888 +
63889 unlock_page(page);
63890 if (swapcache) {
63891 /*
63892 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
63893
63894 /* No need to invalidate - it was non-present before */
63895 update_mmu_cache(vma, address, page_table);
63896 +
63897 +#ifdef CONFIG_PAX_SEGMEXEC
63898 + pax_mirror_anon_pte(vma, address, page, ptl);
63899 +#endif
63900 +
63901 unlock:
63902 pte_unmap_unlock(page_table, ptl);
63903 out:
63904 @@ -3039,40 +3252,6 @@ out_release:
63905 }
63906
63907 /*
63908 - * This is like a special single-page "expand_{down|up}wards()",
63909 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63910 - * doesn't hit another vma.
63911 - */
63912 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63913 -{
63914 - address &= PAGE_MASK;
63915 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63916 - struct vm_area_struct *prev = vma->vm_prev;
63917 -
63918 - /*
63919 - * Is there a mapping abutting this one below?
63920 - *
63921 - * That's only ok if it's the same stack mapping
63922 - * that has gotten split..
63923 - */
63924 - if (prev && prev->vm_end == address)
63925 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63926 -
63927 - expand_downwards(vma, address - PAGE_SIZE);
63928 - }
63929 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63930 - struct vm_area_struct *next = vma->vm_next;
63931 -
63932 - /* As VM_GROWSDOWN but s/below/above/ */
63933 - if (next && next->vm_start == address + PAGE_SIZE)
63934 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63935 -
63936 - expand_upwards(vma, address + PAGE_SIZE);
63937 - }
63938 - return 0;
63939 -}
63940 -
63941 -/*
63942 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63943 * but allow concurrent faults), and pte mapped but not yet locked.
63944 * We return with mmap_sem still held, but pte unmapped and unlocked.
63945 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
63946 unsigned long address, pte_t *page_table, pmd_t *pmd,
63947 unsigned int flags)
63948 {
63949 - struct page *page;
63950 + struct page *page = NULL;
63951 spinlock_t *ptl;
63952 pte_t entry;
63953
63954 - pte_unmap(page_table);
63955 -
63956 - /* Check if we need to add a guard page to the stack */
63957 - if (check_stack_guard_page(vma, address) < 0)
63958 - return VM_FAULT_SIGBUS;
63959 -
63960 - /* Use the zero-page for reads */
63961 if (!(flags & FAULT_FLAG_WRITE)) {
63962 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63963 vma->vm_page_prot));
63964 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63965 + ptl = pte_lockptr(mm, pmd);
63966 + spin_lock(ptl);
63967 if (!pte_none(*page_table))
63968 goto unlock;
63969 goto setpte;
63970 }
63971
63972 /* Allocate our own private page. */
63973 + pte_unmap(page_table);
63974 +
63975 if (unlikely(anon_vma_prepare(vma)))
63976 goto oom;
63977 page = alloc_zeroed_user_highpage_movable(vma, address);
63978 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
63979 if (!pte_none(*page_table))
63980 goto release;
63981
63982 +#ifdef CONFIG_PAX_SEGMEXEC
63983 + if (pax_find_mirror_vma(vma))
63984 + BUG_ON(!trylock_page(page));
63985 +#endif
63986 +
63987 inc_mm_counter_fast(mm, MM_ANONPAGES);
63988 page_add_new_anon_rmap(page, vma, address);
63989 setpte:
63990 @@ -3127,6 +3307,12 @@ setpte:
63991
63992 /* No need to invalidate - it was non-present before */
63993 update_mmu_cache(vma, address, page_table);
63994 +
63995 +#ifdef CONFIG_PAX_SEGMEXEC
63996 + if (page)
63997 + pax_mirror_anon_pte(vma, address, page, ptl);
63998 +#endif
63999 +
64000 unlock:
64001 pte_unmap_unlock(page_table, ptl);
64002 return 0;
64003 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64004 */
64005 /* Only go through if we didn't race with anybody else... */
64006 if (likely(pte_same(*page_table, orig_pte))) {
64007 +
64008 +#ifdef CONFIG_PAX_SEGMEXEC
64009 + if (anon && pax_find_mirror_vma(vma))
64010 + BUG_ON(!trylock_page(page));
64011 +#endif
64012 +
64013 flush_icache_page(vma, page);
64014 entry = mk_pte(page, vma->vm_page_prot);
64015 if (flags & FAULT_FLAG_WRITE)
64016 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64017
64018 /* no need to invalidate: a not-present page won't be cached */
64019 update_mmu_cache(vma, address, page_table);
64020 +
64021 +#ifdef CONFIG_PAX_SEGMEXEC
64022 + if (anon)
64023 + pax_mirror_anon_pte(vma, address, page, ptl);
64024 + else
64025 + pax_mirror_file_pte(vma, address, page, ptl);
64026 +#endif
64027 +
64028 } else {
64029 if (charged)
64030 mem_cgroup_uncharge_page(page);
64031 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64032 if (flags & FAULT_FLAG_WRITE)
64033 flush_tlb_fix_spurious_fault(vma, address);
64034 }
64035 +
64036 +#ifdef CONFIG_PAX_SEGMEXEC
64037 + pax_mirror_pte(vma, address, pte, pmd, ptl);
64038 + return 0;
64039 +#endif
64040 +
64041 unlock:
64042 pte_unmap_unlock(pte, ptl);
64043 return 0;
64044 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64045 pmd_t *pmd;
64046 pte_t *pte;
64047
64048 +#ifdef CONFIG_PAX_SEGMEXEC
64049 + struct vm_area_struct *vma_m;
64050 +#endif
64051 +
64052 __set_current_state(TASK_RUNNING);
64053
64054 count_vm_event(PGFAULT);
64055 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64056 if (unlikely(is_vm_hugetlb_page(vma)))
64057 return hugetlb_fault(mm, vma, address, flags);
64058
64059 +#ifdef CONFIG_PAX_SEGMEXEC
64060 + vma_m = pax_find_mirror_vma(vma);
64061 + if (vma_m) {
64062 + unsigned long address_m;
64063 + pgd_t *pgd_m;
64064 + pud_t *pud_m;
64065 + pmd_t *pmd_m;
64066 +
64067 + if (vma->vm_start > vma_m->vm_start) {
64068 + address_m = address;
64069 + address -= SEGMEXEC_TASK_SIZE;
64070 + vma = vma_m;
64071 + } else
64072 + address_m = address + SEGMEXEC_TASK_SIZE;
64073 +
64074 + pgd_m = pgd_offset(mm, address_m);
64075 + pud_m = pud_alloc(mm, pgd_m, address_m);
64076 + if (!pud_m)
64077 + return VM_FAULT_OOM;
64078 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64079 + if (!pmd_m)
64080 + return VM_FAULT_OOM;
64081 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64082 + return VM_FAULT_OOM;
64083 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64084 + }
64085 +#endif
64086 +
64087 pgd = pgd_offset(mm, address);
64088 pud = pud_alloc(mm, pgd, address);
64089 if (!pud)
64090 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64091 * run pte_offset_map on the pmd, if an huge pmd could
64092 * materialize from under us from a different thread.
64093 */
64094 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64095 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64096 return VM_FAULT_OOM;
64097 /* if an huge pmd materialized from under us just retry later */
64098 if (unlikely(pmd_trans_huge(*pmd)))
64099 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64100 gate_vma.vm_start = FIXADDR_USER_START;
64101 gate_vma.vm_end = FIXADDR_USER_END;
64102 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64103 - gate_vma.vm_page_prot = __P101;
64104 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64105 /*
64106 * Make sure the vDSO gets into every core dump.
64107 * Dumping its contents makes post-mortem fully interpretable later
64108 diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64109 --- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
64110 +++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
64111 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64112
64113 int sysctl_memory_failure_recovery __read_mostly = 1;
64114
64115 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64116 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64117
64118 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64119
64120 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
64121 }
64122
64123 nr_pages = 1 << compound_trans_order(hpage);
64124 - atomic_long_add(nr_pages, &mce_bad_pages);
64125 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64126
64127 /*
64128 * We need/can do nothing about count=0 pages.
64129 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
64130 if (!PageHWPoison(hpage)
64131 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64132 || (p != hpage && TestSetPageHWPoison(hpage))) {
64133 - atomic_long_sub(nr_pages, &mce_bad_pages);
64134 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64135 return 0;
64136 }
64137 set_page_hwpoison_huge_page(hpage);
64138 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
64139 }
64140 if (hwpoison_filter(p)) {
64141 if (TestClearPageHWPoison(p))
64142 - atomic_long_sub(nr_pages, &mce_bad_pages);
64143 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64144 unlock_page(hpage);
64145 put_page(hpage);
64146 return 0;
64147 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64148 return 0;
64149 }
64150 if (TestClearPageHWPoison(p))
64151 - atomic_long_sub(nr_pages, &mce_bad_pages);
64152 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64153 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64154 return 0;
64155 }
64156 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64157 */
64158 if (TestClearPageHWPoison(page)) {
64159 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64160 - atomic_long_sub(nr_pages, &mce_bad_pages);
64161 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64162 freeit = 1;
64163 if (PageHuge(page))
64164 clear_page_hwpoison_huge_page(page);
64165 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64166 }
64167 done:
64168 if (!PageHWPoison(hpage))
64169 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64170 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64171 set_page_hwpoison_huge_page(hpage);
64172 dequeue_hwpoisoned_huge_page(hpage);
64173 /* keep elevated page count for bad page */
64174 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64175 return ret;
64176
64177 done:
64178 - atomic_long_add(1, &mce_bad_pages);
64179 + atomic_long_add_unchecked(1, &mce_bad_pages);
64180 SetPageHWPoison(page);
64181 /* keep elevated page count for bad page */
64182 return ret;
64183 diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64184 --- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
64185 +++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
64186 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
64187 unsigned long vmstart;
64188 unsigned long vmend;
64189
64190 +#ifdef CONFIG_PAX_SEGMEXEC
64191 + struct vm_area_struct *vma_m;
64192 +#endif
64193 +
64194 vma = find_vma_prev(mm, start, &prev);
64195 if (!vma || vma->vm_start > start)
64196 return -EFAULT;
64197 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
64198 err = policy_vma(vma, new_pol);
64199 if (err)
64200 goto out;
64201 +
64202 +#ifdef CONFIG_PAX_SEGMEXEC
64203 + vma_m = pax_find_mirror_vma(vma);
64204 + if (vma_m) {
64205 + err = policy_vma(vma_m, new_pol);
64206 + if (err)
64207 + goto out;
64208 + }
64209 +#endif
64210 +
64211 }
64212
64213 out:
64214 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64215
64216 if (end < start)
64217 return -EINVAL;
64218 +
64219 +#ifdef CONFIG_PAX_SEGMEXEC
64220 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64221 + if (end > SEGMEXEC_TASK_SIZE)
64222 + return -EINVAL;
64223 + } else
64224 +#endif
64225 +
64226 + if (end > TASK_SIZE)
64227 + return -EINVAL;
64228 +
64229 if (end == start)
64230 return 0;
64231
64232 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64233 if (!mm)
64234 goto out;
64235
64236 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64237 + if (mm != current->mm &&
64238 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64239 + err = -EPERM;
64240 + goto out;
64241 + }
64242 +#endif
64243 +
64244 /*
64245 * Check if this process has the right to modify the specified
64246 * process. The right exists if the process has administrative
64247 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64248 rcu_read_lock();
64249 tcred = __task_cred(task);
64250 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64251 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64252 - !capable(CAP_SYS_NICE)) {
64253 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64254 rcu_read_unlock();
64255 err = -EPERM;
64256 goto out;
64257 diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
64258 --- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
64259 +++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
64260 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
64261 unsigned long chunk_start;
64262 int err;
64263
64264 + pax_track_stack();
64265 +
64266 task_nodes = cpuset_mems_allowed(task);
64267
64268 err = -ENOMEM;
64269 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64270 if (!mm)
64271 return -EINVAL;
64272
64273 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64274 + if (mm != current->mm &&
64275 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64276 + err = -EPERM;
64277 + goto out;
64278 + }
64279 +#endif
64280 +
64281 /*
64282 * Check if this process has the right to modify the specified
64283 * process. The right exists if the process has administrative
64284 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64285 rcu_read_lock();
64286 tcred = __task_cred(task);
64287 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64288 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64289 - !capable(CAP_SYS_NICE)) {
64290 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64291 rcu_read_unlock();
64292 err = -EPERM;
64293 goto out;
64294 diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
64295 --- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
64296 +++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
64297 @@ -13,6 +13,7 @@
64298 #include <linux/pagemap.h>
64299 #include <linux/mempolicy.h>
64300 #include <linux/syscalls.h>
64301 +#include <linux/security.h>
64302 #include <linux/sched.h>
64303 #include <linux/module.h>
64304 #include <linux/rmap.h>
64305 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
64306 return -EINVAL;
64307 if (end == start)
64308 return 0;
64309 + if (end > TASK_SIZE)
64310 + return -EINVAL;
64311 +
64312 vma = find_vma_prev(current->mm, start, &prev);
64313 if (!vma || vma->vm_start > start)
64314 return -ENOMEM;
64315 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
64316 for (nstart = start ; ; ) {
64317 vm_flags_t newflags;
64318
64319 +#ifdef CONFIG_PAX_SEGMEXEC
64320 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64321 + break;
64322 +#endif
64323 +
64324 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64325
64326 newflags = vma->vm_flags | VM_LOCKED;
64327 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64328 lock_limit >>= PAGE_SHIFT;
64329
64330 /* check against resource limits */
64331 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64332 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64333 error = do_mlock(start, len, 1);
64334 up_write(&current->mm->mmap_sem);
64335 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64336 static int do_mlockall(int flags)
64337 {
64338 struct vm_area_struct * vma, * prev = NULL;
64339 - unsigned int def_flags = 0;
64340
64341 if (flags & MCL_FUTURE)
64342 - def_flags = VM_LOCKED;
64343 - current->mm->def_flags = def_flags;
64344 + current->mm->def_flags |= VM_LOCKED;
64345 + else
64346 + current->mm->def_flags &= ~VM_LOCKED;
64347 if (flags == MCL_FUTURE)
64348 goto out;
64349
64350 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64351 vm_flags_t newflags;
64352
64353 +#ifdef CONFIG_PAX_SEGMEXEC
64354 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64355 + break;
64356 +#endif
64357 +
64358 + BUG_ON(vma->vm_end > TASK_SIZE);
64359 newflags = vma->vm_flags | VM_LOCKED;
64360 if (!(flags & MCL_CURRENT))
64361 newflags &= ~VM_LOCKED;
64362 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64363 lock_limit >>= PAGE_SHIFT;
64364
64365 ret = -ENOMEM;
64366 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64367 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64368 capable(CAP_IPC_LOCK))
64369 ret = do_mlockall(flags);
64370 diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
64371 --- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
64372 +++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
64373 @@ -46,6 +46,16 @@
64374 #define arch_rebalance_pgtables(addr, len) (addr)
64375 #endif
64376
64377 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64378 +{
64379 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64380 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64381 + up_read(&mm->mmap_sem);
64382 + BUG();
64383 + }
64384 +#endif
64385 +}
64386 +
64387 static void unmap_region(struct mm_struct *mm,
64388 struct vm_area_struct *vma, struct vm_area_struct *prev,
64389 unsigned long start, unsigned long end);
64390 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
64391 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64392 *
64393 */
64394 -pgprot_t protection_map[16] = {
64395 +pgprot_t protection_map[16] __read_only = {
64396 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64397 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64398 };
64399
64400 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
64401 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64402 {
64403 - return __pgprot(pgprot_val(protection_map[vm_flags &
64404 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64405 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64406 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64407 +
64408 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64409 + if (!(__supported_pte_mask & _PAGE_NX) &&
64410 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64411 + (vm_flags & (VM_READ | VM_WRITE)))
64412 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64413 +#endif
64414 +
64415 + return prot;
64416 }
64417 EXPORT_SYMBOL(vm_get_page_prot);
64418
64419 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
64420 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
64421 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64422 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64423 /*
64424 * Make sure vm_committed_as in one cacheline and not cacheline shared with
64425 * other variables. It can be updated by several CPUs frequently.
64426 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
64427 struct vm_area_struct *next = vma->vm_next;
64428
64429 might_sleep();
64430 + BUG_ON(vma->vm_mirror);
64431 if (vma->vm_ops && vma->vm_ops->close)
64432 vma->vm_ops->close(vma);
64433 if (vma->vm_file) {
64434 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64435 * not page aligned -Ram Gupta
64436 */
64437 rlim = rlimit(RLIMIT_DATA);
64438 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64439 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64440 (mm->end_data - mm->start_data) > rlim)
64441 goto out;
64442 @@ -697,6 +719,12 @@ static int
64443 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64444 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64445 {
64446 +
64447 +#ifdef CONFIG_PAX_SEGMEXEC
64448 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64449 + return 0;
64450 +#endif
64451 +
64452 if (is_mergeable_vma(vma, file, vm_flags) &&
64453 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64454 if (vma->vm_pgoff == vm_pgoff)
64455 @@ -716,6 +744,12 @@ static int
64456 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64457 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64458 {
64459 +
64460 +#ifdef CONFIG_PAX_SEGMEXEC
64461 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64462 + return 0;
64463 +#endif
64464 +
64465 if (is_mergeable_vma(vma, file, vm_flags) &&
64466 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64467 pgoff_t vm_pglen;
64468 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
64469 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64470 struct vm_area_struct *prev, unsigned long addr,
64471 unsigned long end, unsigned long vm_flags,
64472 - struct anon_vma *anon_vma, struct file *file,
64473 + struct anon_vma *anon_vma, struct file *file,
64474 pgoff_t pgoff, struct mempolicy *policy)
64475 {
64476 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64477 struct vm_area_struct *area, *next;
64478 int err;
64479
64480 +#ifdef CONFIG_PAX_SEGMEXEC
64481 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64482 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64483 +
64484 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64485 +#endif
64486 +
64487 /*
64488 * We later require that vma->vm_flags == vm_flags,
64489 * so this tests vma->vm_flags & VM_SPECIAL, too.
64490 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
64491 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64492 next = next->vm_next;
64493
64494 +#ifdef CONFIG_PAX_SEGMEXEC
64495 + if (prev)
64496 + prev_m = pax_find_mirror_vma(prev);
64497 + if (area)
64498 + area_m = pax_find_mirror_vma(area);
64499 + if (next)
64500 + next_m = pax_find_mirror_vma(next);
64501 +#endif
64502 +
64503 /*
64504 * Can it merge with the predecessor?
64505 */
64506 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
64507 /* cases 1, 6 */
64508 err = vma_adjust(prev, prev->vm_start,
64509 next->vm_end, prev->vm_pgoff, NULL);
64510 - } else /* cases 2, 5, 7 */
64511 +
64512 +#ifdef CONFIG_PAX_SEGMEXEC
64513 + if (!err && prev_m)
64514 + err = vma_adjust(prev_m, prev_m->vm_start,
64515 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64516 +#endif
64517 +
64518 + } else { /* cases 2, 5, 7 */
64519 err = vma_adjust(prev, prev->vm_start,
64520 end, prev->vm_pgoff, NULL);
64521 +
64522 +#ifdef CONFIG_PAX_SEGMEXEC
64523 + if (!err && prev_m)
64524 + err = vma_adjust(prev_m, prev_m->vm_start,
64525 + end_m, prev_m->vm_pgoff, NULL);
64526 +#endif
64527 +
64528 + }
64529 if (err)
64530 return NULL;
64531 khugepaged_enter_vma_merge(prev);
64532 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
64533 mpol_equal(policy, vma_policy(next)) &&
64534 can_vma_merge_before(next, vm_flags,
64535 anon_vma, file, pgoff+pglen)) {
64536 - if (prev && addr < prev->vm_end) /* case 4 */
64537 + if (prev && addr < prev->vm_end) { /* case 4 */
64538 err = vma_adjust(prev, prev->vm_start,
64539 addr, prev->vm_pgoff, NULL);
64540 - else /* cases 3, 8 */
64541 +
64542 +#ifdef CONFIG_PAX_SEGMEXEC
64543 + if (!err && prev_m)
64544 + err = vma_adjust(prev_m, prev_m->vm_start,
64545 + addr_m, prev_m->vm_pgoff, NULL);
64546 +#endif
64547 +
64548 + } else { /* cases 3, 8 */
64549 err = vma_adjust(area, addr, next->vm_end,
64550 next->vm_pgoff - pglen, NULL);
64551 +
64552 +#ifdef CONFIG_PAX_SEGMEXEC
64553 + if (!err && area_m)
64554 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
64555 + next_m->vm_pgoff - pglen, NULL);
64556 +#endif
64557 +
64558 + }
64559 if (err)
64560 return NULL;
64561 khugepaged_enter_vma_merge(area);
64562 @@ -929,14 +1009,11 @@ none:
64563 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64564 struct file *file, long pages)
64565 {
64566 - const unsigned long stack_flags
64567 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64568 -
64569 if (file) {
64570 mm->shared_vm += pages;
64571 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64572 mm->exec_vm += pages;
64573 - } else if (flags & stack_flags)
64574 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64575 mm->stack_vm += pages;
64576 if (flags & (VM_RESERVED|VM_IO))
64577 mm->reserved_vm += pages;
64578 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
64579 * (the exception is when the underlying filesystem is noexec
64580 * mounted, in which case we dont add PROT_EXEC.)
64581 */
64582 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64583 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64584 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64585 prot |= PROT_EXEC;
64586
64587 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
64588 /* Obtain the address to map to. we verify (or select) it and ensure
64589 * that it represents a valid section of the address space.
64590 */
64591 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64592 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64593 if (addr & ~PAGE_MASK)
64594 return addr;
64595
64596 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
64597 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64598 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64599
64600 +#ifdef CONFIG_PAX_MPROTECT
64601 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64602 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64603 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64604 + gr_log_rwxmmap(file);
64605 +
64606 +#ifdef CONFIG_PAX_EMUPLT
64607 + vm_flags &= ~VM_EXEC;
64608 +#else
64609 + return -EPERM;
64610 +#endif
64611 +
64612 + }
64613 +
64614 + if (!(vm_flags & VM_EXEC))
64615 + vm_flags &= ~VM_MAYEXEC;
64616 +#else
64617 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64618 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64619 +#endif
64620 + else
64621 + vm_flags &= ~VM_MAYWRITE;
64622 + }
64623 +#endif
64624 +
64625 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64626 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64627 + vm_flags &= ~VM_PAGEEXEC;
64628 +#endif
64629 +
64630 if (flags & MAP_LOCKED)
64631 if (!can_do_mlock())
64632 return -EPERM;
64633 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
64634 locked += mm->locked_vm;
64635 lock_limit = rlimit(RLIMIT_MEMLOCK);
64636 lock_limit >>= PAGE_SHIFT;
64637 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64638 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64639 return -EAGAIN;
64640 }
64641 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
64642 if (error)
64643 return error;
64644
64645 + if (!gr_acl_handle_mmap(file, prot))
64646 + return -EACCES;
64647 +
64648 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64649 }
64650 EXPORT_SYMBOL(do_mmap_pgoff);
64651 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
64652 vm_flags_t vm_flags = vma->vm_flags;
64653
64654 /* If it was private or non-writable, the write bit is already clear */
64655 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64656 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64657 return 0;
64658
64659 /* The backer wishes to know when pages are first written to? */
64660 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
64661 unsigned long charged = 0;
64662 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64663
64664 +#ifdef CONFIG_PAX_SEGMEXEC
64665 + struct vm_area_struct *vma_m = NULL;
64666 +#endif
64667 +
64668 + /*
64669 + * mm->mmap_sem is required to protect against another thread
64670 + * changing the mappings in case we sleep.
64671 + */
64672 + verify_mm_writelocked(mm);
64673 +
64674 /* Clear old maps */
64675 error = -ENOMEM;
64676 -munmap_back:
64677 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64678 if (vma && vma->vm_start < addr + len) {
64679 if (do_munmap(mm, addr, len))
64680 return -ENOMEM;
64681 - goto munmap_back;
64682 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64683 + BUG_ON(vma && vma->vm_start < addr + len);
64684 }
64685
64686 /* Check against address space limit. */
64687 @@ -1266,6 +1387,16 @@ munmap_back:
64688 goto unacct_error;
64689 }
64690
64691 +#ifdef CONFIG_PAX_SEGMEXEC
64692 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64693 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64694 + if (!vma_m) {
64695 + error = -ENOMEM;
64696 + goto free_vma;
64697 + }
64698 + }
64699 +#endif
64700 +
64701 vma->vm_mm = mm;
64702 vma->vm_start = addr;
64703 vma->vm_end = addr + len;
64704 @@ -1289,6 +1420,19 @@ munmap_back:
64705 error = file->f_op->mmap(file, vma);
64706 if (error)
64707 goto unmap_and_free_vma;
64708 +
64709 +#ifdef CONFIG_PAX_SEGMEXEC
64710 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64711 + added_exe_file_vma(mm);
64712 +#endif
64713 +
64714 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64715 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64716 + vma->vm_flags |= VM_PAGEEXEC;
64717 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64718 + }
64719 +#endif
64720 +
64721 if (vm_flags & VM_EXECUTABLE)
64722 added_exe_file_vma(mm);
64723
64724 @@ -1324,6 +1468,11 @@ munmap_back:
64725 vma_link(mm, vma, prev, rb_link, rb_parent);
64726 file = vma->vm_file;
64727
64728 +#ifdef CONFIG_PAX_SEGMEXEC
64729 + if (vma_m)
64730 + BUG_ON(pax_mirror_vma(vma_m, vma));
64731 +#endif
64732 +
64733 /* Once vma denies write, undo our temporary denial count */
64734 if (correct_wcount)
64735 atomic_inc(&inode->i_writecount);
64736 @@ -1332,6 +1481,7 @@ out:
64737
64738 mm->total_vm += len >> PAGE_SHIFT;
64739 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64740 + track_exec_limit(mm, addr, addr + len, vm_flags);
64741 if (vm_flags & VM_LOCKED) {
64742 if (!mlock_vma_pages_range(vma, addr, addr + len))
64743 mm->locked_vm += (len >> PAGE_SHIFT);
64744 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
64745 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64746 charged = 0;
64747 free_vma:
64748 +
64749 +#ifdef CONFIG_PAX_SEGMEXEC
64750 + if (vma_m)
64751 + kmem_cache_free(vm_area_cachep, vma_m);
64752 +#endif
64753 +
64754 kmem_cache_free(vm_area_cachep, vma);
64755 unacct_error:
64756 if (charged)
64757 @@ -1356,6 +1512,44 @@ unacct_error:
64758 return error;
64759 }
64760
64761 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64762 +{
64763 + if (!vma) {
64764 +#ifdef CONFIG_STACK_GROWSUP
64765 + if (addr > sysctl_heap_stack_gap)
64766 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64767 + else
64768 + vma = find_vma(current->mm, 0);
64769 + if (vma && (vma->vm_flags & VM_GROWSUP))
64770 + return false;
64771 +#endif
64772 + return true;
64773 + }
64774 +
64775 + if (addr + len > vma->vm_start)
64776 + return false;
64777 +
64778 + if (vma->vm_flags & VM_GROWSDOWN)
64779 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64780 +#ifdef CONFIG_STACK_GROWSUP
64781 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64782 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64783 +#endif
64784 +
64785 + return true;
64786 +}
64787 +
64788 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64789 +{
64790 + if (vma->vm_start < len)
64791 + return -ENOMEM;
64792 + if (!(vma->vm_flags & VM_GROWSDOWN))
64793 + return vma->vm_start - len;
64794 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64795 + return vma->vm_start - len - sysctl_heap_stack_gap;
64796 + return -ENOMEM;
64797 +}
64798 +
64799 /* Get an address range which is currently unmapped.
64800 * For shmat() with addr=0.
64801 *
64802 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
64803 if (flags & MAP_FIXED)
64804 return addr;
64805
64806 +#ifdef CONFIG_PAX_RANDMMAP
64807 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64808 +#endif
64809 +
64810 if (addr) {
64811 addr = PAGE_ALIGN(addr);
64812 - vma = find_vma(mm, addr);
64813 - if (TASK_SIZE - len >= addr &&
64814 - (!vma || addr + len <= vma->vm_start))
64815 - return addr;
64816 + if (TASK_SIZE - len >= addr) {
64817 + vma = find_vma(mm, addr);
64818 + if (check_heap_stack_gap(vma, addr, len))
64819 + return addr;
64820 + }
64821 }
64822 if (len > mm->cached_hole_size) {
64823 - start_addr = addr = mm->free_area_cache;
64824 + start_addr = addr = mm->free_area_cache;
64825 } else {
64826 - start_addr = addr = TASK_UNMAPPED_BASE;
64827 - mm->cached_hole_size = 0;
64828 + start_addr = addr = mm->mmap_base;
64829 + mm->cached_hole_size = 0;
64830 }
64831
64832 full_search:
64833 @@ -1404,34 +1603,40 @@ full_search:
64834 * Start a new search - just in case we missed
64835 * some holes.
64836 */
64837 - if (start_addr != TASK_UNMAPPED_BASE) {
64838 - addr = TASK_UNMAPPED_BASE;
64839 - start_addr = addr;
64840 + if (start_addr != mm->mmap_base) {
64841 + start_addr = addr = mm->mmap_base;
64842 mm->cached_hole_size = 0;
64843 goto full_search;
64844 }
64845 return -ENOMEM;
64846 }
64847 - if (!vma || addr + len <= vma->vm_start) {
64848 - /*
64849 - * Remember the place where we stopped the search:
64850 - */
64851 - mm->free_area_cache = addr + len;
64852 - return addr;
64853 - }
64854 + if (check_heap_stack_gap(vma, addr, len))
64855 + break;
64856 if (addr + mm->cached_hole_size < vma->vm_start)
64857 mm->cached_hole_size = vma->vm_start - addr;
64858 addr = vma->vm_end;
64859 }
64860 +
64861 + /*
64862 + * Remember the place where we stopped the search:
64863 + */
64864 + mm->free_area_cache = addr + len;
64865 + return addr;
64866 }
64867 #endif
64868
64869 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64870 {
64871 +
64872 +#ifdef CONFIG_PAX_SEGMEXEC
64873 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64874 + return;
64875 +#endif
64876 +
64877 /*
64878 * Is this a new hole at the lowest possible address?
64879 */
64880 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64881 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64882 mm->free_area_cache = addr;
64883 mm->cached_hole_size = ~0UL;
64884 }
64885 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
64886 {
64887 struct vm_area_struct *vma;
64888 struct mm_struct *mm = current->mm;
64889 - unsigned long addr = addr0;
64890 + unsigned long base = mm->mmap_base, addr = addr0;
64891
64892 /* requested length too big for entire address space */
64893 if (len > TASK_SIZE)
64894 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
64895 if (flags & MAP_FIXED)
64896 return addr;
64897
64898 +#ifdef CONFIG_PAX_RANDMMAP
64899 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64900 +#endif
64901 +
64902 /* requesting a specific address */
64903 if (addr) {
64904 addr = PAGE_ALIGN(addr);
64905 - vma = find_vma(mm, addr);
64906 - if (TASK_SIZE - len >= addr &&
64907 - (!vma || addr + len <= vma->vm_start))
64908 - return addr;
64909 + if (TASK_SIZE - len >= addr) {
64910 + vma = find_vma(mm, addr);
64911 + if (check_heap_stack_gap(vma, addr, len))
64912 + return addr;
64913 + }
64914 }
64915
64916 /* check if free_area_cache is useful for us */
64917 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
64918 /* make sure it can fit in the remaining address space */
64919 if (addr > len) {
64920 vma = find_vma(mm, addr-len);
64921 - if (!vma || addr <= vma->vm_start)
64922 + if (check_heap_stack_gap(vma, addr - len, len))
64923 /* remember the address as a hint for next time */
64924 return (mm->free_area_cache = addr-len);
64925 }
64926 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
64927 * return with success:
64928 */
64929 vma = find_vma(mm, addr);
64930 - if (!vma || addr+len <= vma->vm_start)
64931 + if (check_heap_stack_gap(vma, addr, len))
64932 /* remember the address as a hint for next time */
64933 return (mm->free_area_cache = addr);
64934
64935 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
64936 mm->cached_hole_size = vma->vm_start - addr;
64937
64938 /* try just below the current vma->vm_start */
64939 - addr = vma->vm_start-len;
64940 - } while (len < vma->vm_start);
64941 + addr = skip_heap_stack_gap(vma, len);
64942 + } while (!IS_ERR_VALUE(addr));
64943
64944 bottomup:
64945 /*
64946 @@ -1515,13 +1725,21 @@ bottomup:
64947 * can happen with large stack limits and large mmap()
64948 * allocations.
64949 */
64950 + mm->mmap_base = TASK_UNMAPPED_BASE;
64951 +
64952 +#ifdef CONFIG_PAX_RANDMMAP
64953 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64954 + mm->mmap_base += mm->delta_mmap;
64955 +#endif
64956 +
64957 + mm->free_area_cache = mm->mmap_base;
64958 mm->cached_hole_size = ~0UL;
64959 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64960 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64961 /*
64962 * Restore the topdown base:
64963 */
64964 - mm->free_area_cache = mm->mmap_base;
64965 + mm->mmap_base = base;
64966 + mm->free_area_cache = base;
64967 mm->cached_hole_size = ~0UL;
64968
64969 return addr;
64970 @@ -1530,6 +1748,12 @@ bottomup:
64971
64972 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64973 {
64974 +
64975 +#ifdef CONFIG_PAX_SEGMEXEC
64976 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64977 + return;
64978 +#endif
64979 +
64980 /*
64981 * Is this a new hole at the highest possible address?
64982 */
64983 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
64984 mm->free_area_cache = addr;
64985
64986 /* dont allow allocations above current base */
64987 - if (mm->free_area_cache > mm->mmap_base)
64988 + if (mm->free_area_cache > mm->mmap_base) {
64989 mm->free_area_cache = mm->mmap_base;
64990 + mm->cached_hole_size = ~0UL;
64991 + }
64992 }
64993
64994 unsigned long
64995 @@ -1646,6 +1872,28 @@ out:
64996 return prev ? prev->vm_next : vma;
64997 }
64998
64999 +#ifdef CONFIG_PAX_SEGMEXEC
65000 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65001 +{
65002 + struct vm_area_struct *vma_m;
65003 +
65004 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65005 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65006 + BUG_ON(vma->vm_mirror);
65007 + return NULL;
65008 + }
65009 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65010 + vma_m = vma->vm_mirror;
65011 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65012 + BUG_ON(vma->vm_file != vma_m->vm_file);
65013 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65014 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65015 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65016 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65017 + return vma_m;
65018 +}
65019 +#endif
65020 +
65021 /*
65022 * Verify that the stack growth is acceptable and
65023 * update accounting. This is shared with both the
65024 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65025 return -ENOMEM;
65026
65027 /* Stack limit test */
65028 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
65029 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65030 return -ENOMEM;
65031
65032 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65033 locked = mm->locked_vm + grow;
65034 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65035 limit >>= PAGE_SHIFT;
65036 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65037 if (locked > limit && !capable(CAP_IPC_LOCK))
65038 return -ENOMEM;
65039 }
65040 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65041 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65042 * vma is the last one with address > vma->vm_end. Have to extend vma.
65043 */
65044 +#ifndef CONFIG_IA64
65045 +static
65046 +#endif
65047 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65048 {
65049 int error;
65050 + bool locknext;
65051
65052 if (!(vma->vm_flags & VM_GROWSUP))
65053 return -EFAULT;
65054
65055 + /* Also guard against wrapping around to address 0. */
65056 + if (address < PAGE_ALIGN(address+1))
65057 + address = PAGE_ALIGN(address+1);
65058 + else
65059 + return -ENOMEM;
65060 +
65061 /*
65062 * We must make sure the anon_vma is allocated
65063 * so that the anon_vma locking is not a noop.
65064 */
65065 if (unlikely(anon_vma_prepare(vma)))
65066 return -ENOMEM;
65067 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65068 + if (locknext && anon_vma_prepare(vma->vm_next))
65069 + return -ENOMEM;
65070 vma_lock_anon_vma(vma);
65071 + if (locknext)
65072 + vma_lock_anon_vma(vma->vm_next);
65073
65074 /*
65075 * vma->vm_start/vm_end cannot change under us because the caller
65076 * is required to hold the mmap_sem in read mode. We need the
65077 - * anon_vma lock to serialize against concurrent expand_stacks.
65078 - * Also guard against wrapping around to address 0.
65079 + * anon_vma locks to serialize against concurrent expand_stacks
65080 + * and expand_upwards.
65081 */
65082 - if (address < PAGE_ALIGN(address+4))
65083 - address = PAGE_ALIGN(address+4);
65084 - else {
65085 - vma_unlock_anon_vma(vma);
65086 - return -ENOMEM;
65087 - }
65088 error = 0;
65089
65090 /* Somebody else might have raced and expanded it already */
65091 - if (address > vma->vm_end) {
65092 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65093 + error = -ENOMEM;
65094 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65095 unsigned long size, grow;
65096
65097 size = address - vma->vm_start;
65098 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65099 }
65100 }
65101 }
65102 + if (locknext)
65103 + vma_unlock_anon_vma(vma->vm_next);
65104 vma_unlock_anon_vma(vma);
65105 khugepaged_enter_vma_merge(vma);
65106 return error;
65107 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65108 unsigned long address)
65109 {
65110 int error;
65111 + bool lockprev = false;
65112 + struct vm_area_struct *prev;
65113
65114 /*
65115 * We must make sure the anon_vma is allocated
65116 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65117 if (error)
65118 return error;
65119
65120 + prev = vma->vm_prev;
65121 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65122 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65123 +#endif
65124 + if (lockprev && anon_vma_prepare(prev))
65125 + return -ENOMEM;
65126 + if (lockprev)
65127 + vma_lock_anon_vma(prev);
65128 +
65129 vma_lock_anon_vma(vma);
65130
65131 /*
65132 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65133 */
65134
65135 /* Somebody else might have raced and expanded it already */
65136 - if (address < vma->vm_start) {
65137 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65138 + error = -ENOMEM;
65139 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65140 unsigned long size, grow;
65141
65142 +#ifdef CONFIG_PAX_SEGMEXEC
65143 + struct vm_area_struct *vma_m;
65144 +
65145 + vma_m = pax_find_mirror_vma(vma);
65146 +#endif
65147 +
65148 size = vma->vm_end - address;
65149 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65150
65151 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65152 if (!error) {
65153 vma->vm_start = address;
65154 vma->vm_pgoff -= grow;
65155 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65156 +
65157 +#ifdef CONFIG_PAX_SEGMEXEC
65158 + if (vma_m) {
65159 + vma_m->vm_start -= grow << PAGE_SHIFT;
65160 + vma_m->vm_pgoff -= grow;
65161 + }
65162 +#endif
65163 +
65164 perf_event_mmap(vma);
65165 }
65166 }
65167 }
65168 vma_unlock_anon_vma(vma);
65169 + if (lockprev)
65170 + vma_unlock_anon_vma(prev);
65171 khugepaged_enter_vma_merge(vma);
65172 return error;
65173 }
65174 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65175 do {
65176 long nrpages = vma_pages(vma);
65177
65178 +#ifdef CONFIG_PAX_SEGMEXEC
65179 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65180 + vma = remove_vma(vma);
65181 + continue;
65182 + }
65183 +#endif
65184 +
65185 mm->total_vm -= nrpages;
65186 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65187 vma = remove_vma(vma);
65188 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65189 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65190 vma->vm_prev = NULL;
65191 do {
65192 +
65193 +#ifdef CONFIG_PAX_SEGMEXEC
65194 + if (vma->vm_mirror) {
65195 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65196 + vma->vm_mirror->vm_mirror = NULL;
65197 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65198 + vma->vm_mirror = NULL;
65199 + }
65200 +#endif
65201 +
65202 rb_erase(&vma->vm_rb, &mm->mm_rb);
65203 mm->map_count--;
65204 tail_vma = vma;
65205 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
65206 struct vm_area_struct *new;
65207 int err = -ENOMEM;
65208
65209 +#ifdef CONFIG_PAX_SEGMEXEC
65210 + struct vm_area_struct *vma_m, *new_m = NULL;
65211 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65212 +#endif
65213 +
65214 if (is_vm_hugetlb_page(vma) && (addr &
65215 ~(huge_page_mask(hstate_vma(vma)))))
65216 return -EINVAL;
65217
65218 +#ifdef CONFIG_PAX_SEGMEXEC
65219 + vma_m = pax_find_mirror_vma(vma);
65220 +#endif
65221 +
65222 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65223 if (!new)
65224 goto out_err;
65225
65226 +#ifdef CONFIG_PAX_SEGMEXEC
65227 + if (vma_m) {
65228 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65229 + if (!new_m) {
65230 + kmem_cache_free(vm_area_cachep, new);
65231 + goto out_err;
65232 + }
65233 + }
65234 +#endif
65235 +
65236 /* most fields are the same, copy all, and then fixup */
65237 *new = *vma;
65238
65239 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
65240 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65241 }
65242
65243 +#ifdef CONFIG_PAX_SEGMEXEC
65244 + if (vma_m) {
65245 + *new_m = *vma_m;
65246 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
65247 + new_m->vm_mirror = new;
65248 + new->vm_mirror = new_m;
65249 +
65250 + if (new_below)
65251 + new_m->vm_end = addr_m;
65252 + else {
65253 + new_m->vm_start = addr_m;
65254 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65255 + }
65256 + }
65257 +#endif
65258 +
65259 pol = mpol_dup(vma_policy(vma));
65260 if (IS_ERR(pol)) {
65261 err = PTR_ERR(pol);
65262 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
65263 else
65264 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65265
65266 +#ifdef CONFIG_PAX_SEGMEXEC
65267 + if (!err && vma_m) {
65268 + if (anon_vma_clone(new_m, vma_m))
65269 + goto out_free_mpol;
65270 +
65271 + mpol_get(pol);
65272 + vma_set_policy(new_m, pol);
65273 +
65274 + if (new_m->vm_file) {
65275 + get_file(new_m->vm_file);
65276 + if (vma_m->vm_flags & VM_EXECUTABLE)
65277 + added_exe_file_vma(mm);
65278 + }
65279 +
65280 + if (new_m->vm_ops && new_m->vm_ops->open)
65281 + new_m->vm_ops->open(new_m);
65282 +
65283 + if (new_below)
65284 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65285 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65286 + else
65287 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65288 +
65289 + if (err) {
65290 + if (new_m->vm_ops && new_m->vm_ops->close)
65291 + new_m->vm_ops->close(new_m);
65292 + if (new_m->vm_file) {
65293 + if (vma_m->vm_flags & VM_EXECUTABLE)
65294 + removed_exe_file_vma(mm);
65295 + fput(new_m->vm_file);
65296 + }
65297 + mpol_put(pol);
65298 + }
65299 + }
65300 +#endif
65301 +
65302 /* Success. */
65303 if (!err)
65304 return 0;
65305 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
65306 removed_exe_file_vma(mm);
65307 fput(new->vm_file);
65308 }
65309 - unlink_anon_vmas(new);
65310 out_free_mpol:
65311 mpol_put(pol);
65312 out_free_vma:
65313 +
65314 +#ifdef CONFIG_PAX_SEGMEXEC
65315 + if (new_m) {
65316 + unlink_anon_vmas(new_m);
65317 + kmem_cache_free(vm_area_cachep, new_m);
65318 + }
65319 +#endif
65320 +
65321 + unlink_anon_vmas(new);
65322 kmem_cache_free(vm_area_cachep, new);
65323 out_err:
65324 return err;
65325 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
65326 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
65327 unsigned long addr, int new_below)
65328 {
65329 +
65330 +#ifdef CONFIG_PAX_SEGMEXEC
65331 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65332 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65333 + if (mm->map_count >= sysctl_max_map_count-1)
65334 + return -ENOMEM;
65335 + } else
65336 +#endif
65337 +
65338 if (mm->map_count >= sysctl_max_map_count)
65339 return -ENOMEM;
65340
65341 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
65342 * work. This now handles partial unmappings.
65343 * Jeremy Fitzhardinge <jeremy@goop.org>
65344 */
65345 +#ifdef CONFIG_PAX_SEGMEXEC
65346 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65347 {
65348 + int ret = __do_munmap(mm, start, len);
65349 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65350 + return ret;
65351 +
65352 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65353 +}
65354 +
65355 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65356 +#else
65357 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65358 +#endif
65359 +{
65360 unsigned long end;
65361 struct vm_area_struct *vma, *prev, *last;
65362
65363 + /*
65364 + * mm->mmap_sem is required to protect against another thread
65365 + * changing the mappings in case we sleep.
65366 + */
65367 + verify_mm_writelocked(mm);
65368 +
65369 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65370 return -EINVAL;
65371
65372 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
65373 /* Fix up all other VM information */
65374 remove_vma_list(mm, vma);
65375
65376 + track_exec_limit(mm, start, end, 0UL);
65377 +
65378 return 0;
65379 }
65380
65381 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65382
65383 profile_munmap(addr);
65384
65385 +#ifdef CONFIG_PAX_SEGMEXEC
65386 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65387 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65388 + return -EINVAL;
65389 +#endif
65390 +
65391 down_write(&mm->mmap_sem);
65392 ret = do_munmap(mm, addr, len);
65393 up_write(&mm->mmap_sem);
65394 return ret;
65395 }
65396
65397 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65398 -{
65399 -#ifdef CONFIG_DEBUG_VM
65400 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65401 - WARN_ON(1);
65402 - up_read(&mm->mmap_sem);
65403 - }
65404 -#endif
65405 -}
65406 -
65407 /*
65408 * this is really a simplified "do_mmap". it only handles
65409 * anonymous maps. eventually we may be able to do some
65410 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
65411 struct rb_node ** rb_link, * rb_parent;
65412 pgoff_t pgoff = addr >> PAGE_SHIFT;
65413 int error;
65414 + unsigned long charged;
65415
65416 len = PAGE_ALIGN(len);
65417 if (!len)
65418 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
65419
65420 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65421
65422 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65423 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65424 + flags &= ~VM_EXEC;
65425 +
65426 +#ifdef CONFIG_PAX_MPROTECT
65427 + if (mm->pax_flags & MF_PAX_MPROTECT)
65428 + flags &= ~VM_MAYEXEC;
65429 +#endif
65430 +
65431 + }
65432 +#endif
65433 +
65434 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65435 if (error & ~PAGE_MASK)
65436 return error;
65437
65438 + charged = len >> PAGE_SHIFT;
65439 +
65440 /*
65441 * mlock MCL_FUTURE?
65442 */
65443 if (mm->def_flags & VM_LOCKED) {
65444 unsigned long locked, lock_limit;
65445 - locked = len >> PAGE_SHIFT;
65446 + locked = charged;
65447 locked += mm->locked_vm;
65448 lock_limit = rlimit(RLIMIT_MEMLOCK);
65449 lock_limit >>= PAGE_SHIFT;
65450 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
65451 /*
65452 * Clear old maps. this also does some error checking for us
65453 */
65454 - munmap_back:
65455 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65456 if (vma && vma->vm_start < addr + len) {
65457 if (do_munmap(mm, addr, len))
65458 return -ENOMEM;
65459 - goto munmap_back;
65460 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65461 + BUG_ON(vma && vma->vm_start < addr + len);
65462 }
65463
65464 /* Check against address space limits *after* clearing old maps... */
65465 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65466 + if (!may_expand_vm(mm, charged))
65467 return -ENOMEM;
65468
65469 if (mm->map_count > sysctl_max_map_count)
65470 return -ENOMEM;
65471
65472 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65473 + if (security_vm_enough_memory(charged))
65474 return -ENOMEM;
65475
65476 /* Can we just expand an old private anonymous mapping? */
65477 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
65478 */
65479 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65480 if (!vma) {
65481 - vm_unacct_memory(len >> PAGE_SHIFT);
65482 + vm_unacct_memory(charged);
65483 return -ENOMEM;
65484 }
65485
65486 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
65487 vma_link(mm, vma, prev, rb_link, rb_parent);
65488 out:
65489 perf_event_mmap(vma);
65490 - mm->total_vm += len >> PAGE_SHIFT;
65491 + mm->total_vm += charged;
65492 if (flags & VM_LOCKED) {
65493 if (!mlock_vma_pages_range(vma, addr, addr + len))
65494 - mm->locked_vm += (len >> PAGE_SHIFT);
65495 + mm->locked_vm += charged;
65496 }
65497 + track_exec_limit(mm, addr, addr + len, flags);
65498 return addr;
65499 }
65500
65501 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
65502 * Walk the list again, actually closing and freeing it,
65503 * with preemption enabled, without holding any MM locks.
65504 */
65505 - while (vma)
65506 + while (vma) {
65507 + vma->vm_mirror = NULL;
65508 vma = remove_vma(vma);
65509 + }
65510
65511 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65512 }
65513 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
65514 struct vm_area_struct * __vma, * prev;
65515 struct rb_node ** rb_link, * rb_parent;
65516
65517 +#ifdef CONFIG_PAX_SEGMEXEC
65518 + struct vm_area_struct *vma_m = NULL;
65519 +#endif
65520 +
65521 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
65522 + return -EPERM;
65523 +
65524 /*
65525 * The vm_pgoff of a purely anonymous vma should be irrelevant
65526 * until its first write fault, when page's anon_vma and index
65527 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
65528 if ((vma->vm_flags & VM_ACCOUNT) &&
65529 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65530 return -ENOMEM;
65531 +
65532 +#ifdef CONFIG_PAX_SEGMEXEC
65533 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65534 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65535 + if (!vma_m)
65536 + return -ENOMEM;
65537 + }
65538 +#endif
65539 +
65540 vma_link(mm, vma, prev, rb_link, rb_parent);
65541 +
65542 +#ifdef CONFIG_PAX_SEGMEXEC
65543 + if (vma_m)
65544 + BUG_ON(pax_mirror_vma(vma_m, vma));
65545 +#endif
65546 +
65547 return 0;
65548 }
65549
65550 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
65551 struct rb_node **rb_link, *rb_parent;
65552 struct mempolicy *pol;
65553
65554 + BUG_ON(vma->vm_mirror);
65555 +
65556 /*
65557 * If anonymous vma has not yet been faulted, update new pgoff
65558 * to match new location, to increase its chance of merging.
65559 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
65560 return NULL;
65561 }
65562
65563 +#ifdef CONFIG_PAX_SEGMEXEC
65564 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65565 +{
65566 + struct vm_area_struct *prev_m;
65567 + struct rb_node **rb_link_m, *rb_parent_m;
65568 + struct mempolicy *pol_m;
65569 +
65570 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65571 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65572 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65573 + *vma_m = *vma;
65574 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
65575 + if (anon_vma_clone(vma_m, vma))
65576 + return -ENOMEM;
65577 + pol_m = vma_policy(vma_m);
65578 + mpol_get(pol_m);
65579 + vma_set_policy(vma_m, pol_m);
65580 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65581 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65582 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65583 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65584 + if (vma_m->vm_file)
65585 + get_file(vma_m->vm_file);
65586 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65587 + vma_m->vm_ops->open(vma_m);
65588 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65589 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65590 + vma_m->vm_mirror = vma;
65591 + vma->vm_mirror = vma_m;
65592 + return 0;
65593 +}
65594 +#endif
65595 +
65596 /*
65597 * Return true if the calling process may expand its vm space by the passed
65598 * number of pages
65599 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
65600 unsigned long lim;
65601
65602 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
65603 -
65604 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65605 if (cur + npages > lim)
65606 return 0;
65607 return 1;
65608 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
65609 vma->vm_start = addr;
65610 vma->vm_end = addr + len;
65611
65612 +#ifdef CONFIG_PAX_MPROTECT
65613 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65614 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65615 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65616 + return -EPERM;
65617 + if (!(vm_flags & VM_EXEC))
65618 + vm_flags &= ~VM_MAYEXEC;
65619 +#else
65620 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65621 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65622 +#endif
65623 + else
65624 + vm_flags &= ~VM_MAYWRITE;
65625 + }
65626 +#endif
65627 +
65628 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65629 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65630
65631 diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
65632 --- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
65633 +++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
65634 @@ -23,10 +23,16 @@
65635 #include <linux/mmu_notifier.h>
65636 #include <linux/migrate.h>
65637 #include <linux/perf_event.h>
65638 +
65639 +#ifdef CONFIG_PAX_MPROTECT
65640 +#include <linux/elf.h>
65641 +#endif
65642 +
65643 #include <asm/uaccess.h>
65644 #include <asm/pgtable.h>
65645 #include <asm/cacheflush.h>
65646 #include <asm/tlbflush.h>
65647 +#include <asm/mmu_context.h>
65648
65649 #ifndef pgprot_modify
65650 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65651 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
65652 flush_tlb_range(vma, start, end);
65653 }
65654
65655 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65656 +/* called while holding the mmap semaphor for writing except stack expansion */
65657 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65658 +{
65659 + unsigned long oldlimit, newlimit = 0UL;
65660 +
65661 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
65662 + return;
65663 +
65664 + spin_lock(&mm->page_table_lock);
65665 + oldlimit = mm->context.user_cs_limit;
65666 + if ((prot & VM_EXEC) && oldlimit < end)
65667 + /* USER_CS limit moved up */
65668 + newlimit = end;
65669 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65670 + /* USER_CS limit moved down */
65671 + newlimit = start;
65672 +
65673 + if (newlimit) {
65674 + mm->context.user_cs_limit = newlimit;
65675 +
65676 +#ifdef CONFIG_SMP
65677 + wmb();
65678 + cpus_clear(mm->context.cpu_user_cs_mask);
65679 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65680 +#endif
65681 +
65682 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65683 + }
65684 + spin_unlock(&mm->page_table_lock);
65685 + if (newlimit == end) {
65686 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65687 +
65688 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65689 + if (is_vm_hugetlb_page(vma))
65690 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65691 + else
65692 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65693 + }
65694 +}
65695 +#endif
65696 +
65697 int
65698 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65699 unsigned long start, unsigned long end, unsigned long newflags)
65700 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
65701 int error;
65702 int dirty_accountable = 0;
65703
65704 +#ifdef CONFIG_PAX_SEGMEXEC
65705 + struct vm_area_struct *vma_m = NULL;
65706 + unsigned long start_m, end_m;
65707 +
65708 + start_m = start + SEGMEXEC_TASK_SIZE;
65709 + end_m = end + SEGMEXEC_TASK_SIZE;
65710 +#endif
65711 +
65712 if (newflags == oldflags) {
65713 *pprev = vma;
65714 return 0;
65715 }
65716
65717 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65718 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65719 +
65720 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65721 + return -ENOMEM;
65722 +
65723 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65724 + return -ENOMEM;
65725 + }
65726 +
65727 /*
65728 * If we make a private mapping writable we increase our commit;
65729 * but (without finer accounting) cannot reduce our commit if we
65730 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
65731 }
65732 }
65733
65734 +#ifdef CONFIG_PAX_SEGMEXEC
65735 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65736 + if (start != vma->vm_start) {
65737 + error = split_vma(mm, vma, start, 1);
65738 + if (error)
65739 + goto fail;
65740 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65741 + *pprev = (*pprev)->vm_next;
65742 + }
65743 +
65744 + if (end != vma->vm_end) {
65745 + error = split_vma(mm, vma, end, 0);
65746 + if (error)
65747 + goto fail;
65748 + }
65749 +
65750 + if (pax_find_mirror_vma(vma)) {
65751 + error = __do_munmap(mm, start_m, end_m - start_m);
65752 + if (error)
65753 + goto fail;
65754 + } else {
65755 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65756 + if (!vma_m) {
65757 + error = -ENOMEM;
65758 + goto fail;
65759 + }
65760 + vma->vm_flags = newflags;
65761 + error = pax_mirror_vma(vma_m, vma);
65762 + if (error) {
65763 + vma->vm_flags = oldflags;
65764 + goto fail;
65765 + }
65766 + }
65767 + }
65768 +#endif
65769 +
65770 /*
65771 * First try to merge with previous and/or next vma.
65772 */
65773 @@ -204,9 +306,21 @@ success:
65774 * vm_flags and vm_page_prot are protected by the mmap_sem
65775 * held in write mode.
65776 */
65777 +
65778 +#ifdef CONFIG_PAX_SEGMEXEC
65779 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65780 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65781 +#endif
65782 +
65783 vma->vm_flags = newflags;
65784 +
65785 +#ifdef CONFIG_PAX_MPROTECT
65786 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65787 + mm->binfmt->handle_mprotect(vma, newflags);
65788 +#endif
65789 +
65790 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65791 - vm_get_page_prot(newflags));
65792 + vm_get_page_prot(vma->vm_flags));
65793
65794 if (vma_wants_writenotify(vma)) {
65795 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65796 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65797 end = start + len;
65798 if (end <= start)
65799 return -ENOMEM;
65800 +
65801 +#ifdef CONFIG_PAX_SEGMEXEC
65802 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65803 + if (end > SEGMEXEC_TASK_SIZE)
65804 + return -EINVAL;
65805 + } else
65806 +#endif
65807 +
65808 + if (end > TASK_SIZE)
65809 + return -EINVAL;
65810 +
65811 if (!arch_validate_prot(prot))
65812 return -EINVAL;
65813
65814 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65815 /*
65816 * Does the application expect PROT_READ to imply PROT_EXEC:
65817 */
65818 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65819 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65820 prot |= PROT_EXEC;
65821
65822 vm_flags = calc_vm_prot_bits(prot);
65823 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65824 if (start > vma->vm_start)
65825 prev = vma;
65826
65827 +#ifdef CONFIG_PAX_MPROTECT
65828 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65829 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65830 +#endif
65831 +
65832 for (nstart = start ; ; ) {
65833 unsigned long newflags;
65834
65835 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65836
65837 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65838 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65839 + if (prot & (PROT_WRITE | PROT_EXEC))
65840 + gr_log_rwxmprotect(vma->vm_file);
65841 +
65842 + error = -EACCES;
65843 + goto out;
65844 + }
65845 +
65846 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65847 error = -EACCES;
65848 goto out;
65849 }
65850 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65851 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65852 if (error)
65853 goto out;
65854 +
65855 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65856 +
65857 nstart = tmp;
65858
65859 if (nstart < prev->vm_end)
65860 diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
65861 --- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
65862 +++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
65863 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
65864 continue;
65865 pte = ptep_clear_flush(vma, old_addr, old_pte);
65866 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65867 +
65868 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65869 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65870 + pte = pte_exprotect(pte);
65871 +#endif
65872 +
65873 set_pte_at(mm, new_addr, new_pte, pte);
65874 }
65875
65876 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
65877 if (is_vm_hugetlb_page(vma))
65878 goto Einval;
65879
65880 +#ifdef CONFIG_PAX_SEGMEXEC
65881 + if (pax_find_mirror_vma(vma))
65882 + goto Einval;
65883 +#endif
65884 +
65885 /* We can't remap across vm area boundaries */
65886 if (old_len > vma->vm_end - addr)
65887 goto Efault;
65888 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
65889 unsigned long ret = -EINVAL;
65890 unsigned long charged = 0;
65891 unsigned long map_flags;
65892 + unsigned long pax_task_size = TASK_SIZE;
65893
65894 if (new_addr & ~PAGE_MASK)
65895 goto out;
65896
65897 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65898 +#ifdef CONFIG_PAX_SEGMEXEC
65899 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65900 + pax_task_size = SEGMEXEC_TASK_SIZE;
65901 +#endif
65902 +
65903 + pax_task_size -= PAGE_SIZE;
65904 +
65905 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65906 goto out;
65907
65908 /* Check if the location we're moving into overlaps the
65909 * old location at all, and fail if it does.
65910 */
65911 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65912 - goto out;
65913 -
65914 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65915 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65916 goto out;
65917
65918 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65919 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
65920 struct vm_area_struct *vma;
65921 unsigned long ret = -EINVAL;
65922 unsigned long charged = 0;
65923 + unsigned long pax_task_size = TASK_SIZE;
65924
65925 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65926 goto out;
65927 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
65928 if (!new_len)
65929 goto out;
65930
65931 +#ifdef CONFIG_PAX_SEGMEXEC
65932 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65933 + pax_task_size = SEGMEXEC_TASK_SIZE;
65934 +#endif
65935 +
65936 + pax_task_size -= PAGE_SIZE;
65937 +
65938 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65939 + old_len > pax_task_size || addr > pax_task_size-old_len)
65940 + goto out;
65941 +
65942 if (flags & MREMAP_FIXED) {
65943 if (flags & MREMAP_MAYMOVE)
65944 ret = mremap_to(addr, old_len, new_addr, new_len);
65945 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
65946 addr + new_len);
65947 }
65948 ret = addr;
65949 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65950 goto out;
65951 }
65952 }
65953 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
65954 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65955 if (ret)
65956 goto out;
65957 +
65958 + map_flags = vma->vm_flags;
65959 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65960 + if (!(ret & ~PAGE_MASK)) {
65961 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65962 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65963 + }
65964 }
65965 out:
65966 if (ret & ~PAGE_MASK)
65967 diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
65968 --- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
65969 +++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
65970 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
65971 unsigned long __init free_all_memory_core_early(int nodeid)
65972 {
65973 int i;
65974 - u64 start, end;
65975 + u64 start, end, startrange, endrange;
65976 unsigned long count = 0;
65977 - struct range *range = NULL;
65978 + struct range *range = NULL, rangerange = { 0, 0 };
65979 int nr_range;
65980
65981 nr_range = get_free_all_memory_range(&range, nodeid);
65982 + startrange = __pa(range) >> PAGE_SHIFT;
65983 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
65984
65985 for (i = 0; i < nr_range; i++) {
65986 start = range[i].start;
65987 end = range[i].end;
65988 + if (start <= endrange && startrange < end) {
65989 + BUG_ON(rangerange.start | rangerange.end);
65990 + rangerange = range[i];
65991 + continue;
65992 + }
65993 count += end - start;
65994 __free_pages_memory(start, end);
65995 }
65996 + start = rangerange.start;
65997 + end = rangerange.end;
65998 + count += end - start;
65999 + __free_pages_memory(start, end);
66000
66001 return count;
66002 }
66003 diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66004 --- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
66005 +++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
66006 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66007 int sysctl_overcommit_ratio = 50; /* default is 50% */
66008 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66009 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66010 -int heap_stack_gap = 0;
66011
66012 atomic_long_t mmap_pages_allocated;
66013
66014 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66015 EXPORT_SYMBOL(find_vma);
66016
66017 /*
66018 - * find a VMA
66019 - * - we don't extend stack VMAs under NOMMU conditions
66020 - */
66021 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66022 -{
66023 - return find_vma(mm, addr);
66024 -}
66025 -
66026 -/*
66027 * expand a stack to a given address
66028 * - not supported under NOMMU conditions
66029 */
66030 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66031
66032 /* most fields are the same, copy all, and then fixup */
66033 *new = *vma;
66034 + INIT_LIST_HEAD(&new->anon_vma_chain);
66035 *region = *vma->vm_region;
66036 new->vm_region = region;
66037
66038 diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66039 --- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66040 +++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66041 @@ -340,7 +340,7 @@ out:
66042 * This usage means that zero-order pages may not be compound.
66043 */
66044
66045 -static void free_compound_page(struct page *page)
66046 +void free_compound_page(struct page *page)
66047 {
66048 __free_pages_ok(page, compound_order(page));
66049 }
66050 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66051 int i;
66052 int bad = 0;
66053
66054 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66055 + unsigned long index = 1UL << order;
66056 +#endif
66057 +
66058 trace_mm_page_free_direct(page, order);
66059 kmemcheck_free_shadow(page, order);
66060
66061 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66062 debug_check_no_obj_freed(page_address(page),
66063 PAGE_SIZE << order);
66064 }
66065 +
66066 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66067 + for (; index; --index)
66068 + sanitize_highpage(page + index - 1);
66069 +#endif
66070 +
66071 arch_free_page(page, order);
66072 kernel_map_pages(page, 1 << order, 0);
66073
66074 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66075 arch_alloc_page(page, order);
66076 kernel_map_pages(page, 1 << order, 1);
66077
66078 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66079 if (gfp_flags & __GFP_ZERO)
66080 prep_zero_page(page, order, gfp_flags);
66081 +#endif
66082
66083 if (order && (gfp_flags & __GFP_COMP))
66084 prep_compound_page(page, order);
66085 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66086 int cpu;
66087 struct zone *zone;
66088
66089 + pax_track_stack();
66090 +
66091 for_each_populated_zone(zone) {
66092 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66093 continue;
66094 diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66095 --- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
66096 +++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
66097 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
66098 static unsigned int pcpu_last_unit_cpu __read_mostly;
66099
66100 /* the address of the first chunk which starts with the kernel static area */
66101 -void *pcpu_base_addr __read_mostly;
66102 +void *pcpu_base_addr __read_only;
66103 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66104
66105 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66106 diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66107 --- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
66108 +++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
66109 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66110 struct anon_vma *anon_vma = vma->anon_vma;
66111 struct anon_vma_chain *avc;
66112
66113 +#ifdef CONFIG_PAX_SEGMEXEC
66114 + struct anon_vma_chain *avc_m = NULL;
66115 +#endif
66116 +
66117 might_sleep();
66118 if (unlikely(!anon_vma)) {
66119 struct mm_struct *mm = vma->vm_mm;
66120 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66121 if (!avc)
66122 goto out_enomem;
66123
66124 +#ifdef CONFIG_PAX_SEGMEXEC
66125 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66126 + if (!avc_m)
66127 + goto out_enomem_free_avc;
66128 +#endif
66129 +
66130 anon_vma = find_mergeable_anon_vma(vma);
66131 allocated = NULL;
66132 if (!anon_vma) {
66133 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66134 /* page_table_lock to protect against threads */
66135 spin_lock(&mm->page_table_lock);
66136 if (likely(!vma->anon_vma)) {
66137 +
66138 +#ifdef CONFIG_PAX_SEGMEXEC
66139 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66140 +
66141 + if (vma_m) {
66142 + BUG_ON(vma_m->anon_vma);
66143 + vma_m->anon_vma = anon_vma;
66144 + avc_m->anon_vma = anon_vma;
66145 + avc_m->vma = vma;
66146 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66147 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
66148 + avc_m = NULL;
66149 + }
66150 +#endif
66151 +
66152 vma->anon_vma = anon_vma;
66153 avc->anon_vma = anon_vma;
66154 avc->vma = vma;
66155 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66156
66157 if (unlikely(allocated))
66158 put_anon_vma(allocated);
66159 +
66160 +#ifdef CONFIG_PAX_SEGMEXEC
66161 + if (unlikely(avc_m))
66162 + anon_vma_chain_free(avc_m);
66163 +#endif
66164 +
66165 if (unlikely(avc))
66166 anon_vma_chain_free(avc);
66167 }
66168 return 0;
66169
66170 out_enomem_free_avc:
66171 +
66172 +#ifdef CONFIG_PAX_SEGMEXEC
66173 + if (avc_m)
66174 + anon_vma_chain_free(avc_m);
66175 +#endif
66176 +
66177 anon_vma_chain_free(avc);
66178 out_enomem:
66179 return -ENOMEM;
66180 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66181 * Attach the anon_vmas from src to dst.
66182 * Returns 0 on success, -ENOMEM on failure.
66183 */
66184 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66185 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66186 {
66187 struct anon_vma_chain *avc, *pavc;
66188 struct anon_vma *root = NULL;
66189 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66190 * the corresponding VMA in the parent process is attached to.
66191 * Returns 0 on success, non-zero on failure.
66192 */
66193 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66194 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66195 {
66196 struct anon_vma_chain *avc;
66197 struct anon_vma *anon_vma;
66198 diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66199 --- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
66200 +++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
66201 @@ -31,7 +31,7 @@
66202 #include <linux/percpu_counter.h>
66203 #include <linux/swap.h>
66204
66205 -static struct vfsmount *shm_mnt;
66206 +struct vfsmount *shm_mnt;
66207
66208 #ifdef CONFIG_SHMEM
66209 /*
66210 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66211 goto unlock;
66212 }
66213 entry = shmem_swp_entry(info, index, NULL);
66214 + if (!entry)
66215 + goto unlock;
66216 if (entry->val) {
66217 /*
66218 * The more uptodate page coming down from a stacked
66219 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66220 struct vm_area_struct pvma;
66221 struct page *page;
66222
66223 + pax_track_stack();
66224 +
66225 spol = mpol_cond_copy(&mpol,
66226 mpol_shared_policy_lookup(&info->policy, idx));
66227
66228 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
66229 int err = -ENOMEM;
66230
66231 /* Round up to L1_CACHE_BYTES to resist false sharing */
66232 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66233 - L1_CACHE_BYTES), GFP_KERNEL);
66234 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66235 if (!sbinfo)
66236 return -ENOMEM;
66237
66238 diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66239 --- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
66240 +++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
66241 @@ -151,7 +151,7 @@
66242
66243 /* Legal flag mask for kmem_cache_create(). */
66244 #if DEBUG
66245 -# define CREATE_MASK (SLAB_RED_ZONE | \
66246 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66247 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66248 SLAB_CACHE_DMA | \
66249 SLAB_STORE_USER | \
66250 @@ -159,7 +159,7 @@
66251 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66252 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66253 #else
66254 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66255 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66256 SLAB_CACHE_DMA | \
66257 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66258 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66259 @@ -288,7 +288,7 @@ struct kmem_list3 {
66260 * Need this for bootstrapping a per node allocator.
66261 */
66262 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66263 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66264 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66265 #define CACHE_CACHE 0
66266 #define SIZE_AC MAX_NUMNODES
66267 #define SIZE_L3 (2 * MAX_NUMNODES)
66268 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
66269 if ((x)->max_freeable < i) \
66270 (x)->max_freeable = i; \
66271 } while (0)
66272 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66273 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66274 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66275 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66276 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66277 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66278 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66279 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66280 #else
66281 #define STATS_INC_ACTIVE(x) do { } while (0)
66282 #define STATS_DEC_ACTIVE(x) do { } while (0)
66283 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
66284 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66285 */
66286 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66287 - const struct slab *slab, void *obj)
66288 + const struct slab *slab, const void *obj)
66289 {
66290 u32 offset = (obj - slab->s_mem);
66291 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66292 @@ -564,7 +564,7 @@ struct cache_names {
66293 static struct cache_names __initdata cache_names[] = {
66294 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
66295 #include <linux/kmalloc_sizes.h>
66296 - {NULL,}
66297 + {NULL}
66298 #undef CACHE
66299 };
66300
66301 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
66302 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66303 sizes[INDEX_AC].cs_size,
66304 ARCH_KMALLOC_MINALIGN,
66305 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66306 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66307 NULL);
66308
66309 if (INDEX_AC != INDEX_L3) {
66310 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
66311 kmem_cache_create(names[INDEX_L3].name,
66312 sizes[INDEX_L3].cs_size,
66313 ARCH_KMALLOC_MINALIGN,
66314 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66315 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66316 NULL);
66317 }
66318
66319 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
66320 sizes->cs_cachep = kmem_cache_create(names->name,
66321 sizes->cs_size,
66322 ARCH_KMALLOC_MINALIGN,
66323 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66324 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66325 NULL);
66326 }
66327 #ifdef CONFIG_ZONE_DMA
66328 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
66329 }
66330 /* cpu stats */
66331 {
66332 - unsigned long allochit = atomic_read(&cachep->allochit);
66333 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66334 - unsigned long freehit = atomic_read(&cachep->freehit);
66335 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66336 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66337 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66338 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66339 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66340
66341 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66342 allochit, allocmiss, freehit, freemiss);
66343 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
66344
66345 static int __init slab_proc_init(void)
66346 {
66347 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66348 + mode_t gr_mode = S_IRUGO;
66349 +
66350 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66351 + gr_mode = S_IRUSR;
66352 +#endif
66353 +
66354 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66355 #ifdef CONFIG_DEBUG_SLAB_LEAK
66356 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66357 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66358 #endif
66359 return 0;
66360 }
66361 module_init(slab_proc_init);
66362 #endif
66363
66364 +void check_object_size(const void *ptr, unsigned long n, bool to)
66365 +{
66366 +
66367 +#ifdef CONFIG_PAX_USERCOPY
66368 + struct page *page;
66369 + struct kmem_cache *cachep = NULL;
66370 + struct slab *slabp;
66371 + unsigned int objnr;
66372 + unsigned long offset;
66373 +
66374 + if (!n)
66375 + return;
66376 +
66377 + if (ZERO_OR_NULL_PTR(ptr))
66378 + goto report;
66379 +
66380 + if (!virt_addr_valid(ptr))
66381 + return;
66382 +
66383 + page = virt_to_head_page(ptr);
66384 +
66385 + if (!PageSlab(page)) {
66386 + if (object_is_on_stack(ptr, n) == -1)
66387 + goto report;
66388 + return;
66389 + }
66390 +
66391 + cachep = page_get_cache(page);
66392 + if (!(cachep->flags & SLAB_USERCOPY))
66393 + goto report;
66394 +
66395 + slabp = page_get_slab(page);
66396 + objnr = obj_to_index(cachep, slabp, ptr);
66397 + BUG_ON(objnr >= cachep->num);
66398 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66399 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66400 + return;
66401 +
66402 +report:
66403 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66404 +#endif
66405 +
66406 +}
66407 +EXPORT_SYMBOL(check_object_size);
66408 +
66409 /**
66410 * ksize - get the actual amount of memory allocated for a given object
66411 * @objp: Pointer to the object
66412 diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
66413 --- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
66414 +++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
66415 @@ -29,7 +29,7 @@
66416 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66417 * alloc_pages() directly, allocating compound pages so the page order
66418 * does not have to be separately tracked, and also stores the exact
66419 - * allocation size in page->private so that it can be used to accurately
66420 + * allocation size in slob_page->size so that it can be used to accurately
66421 * provide ksize(). These objects are detected in kfree() because slob_page()
66422 * is false for them.
66423 *
66424 @@ -58,6 +58,7 @@
66425 */
66426
66427 #include <linux/kernel.h>
66428 +#include <linux/sched.h>
66429 #include <linux/slab.h>
66430 #include <linux/mm.h>
66431 #include <linux/swap.h> /* struct reclaim_state */
66432 @@ -102,7 +103,8 @@ struct slob_page {
66433 unsigned long flags; /* mandatory */
66434 atomic_t _count; /* mandatory */
66435 slobidx_t units; /* free units left in page */
66436 - unsigned long pad[2];
66437 + unsigned long pad[1];
66438 + unsigned long size; /* size when >=PAGE_SIZE */
66439 slob_t *free; /* first free slob_t in page */
66440 struct list_head list; /* linked list of free pages */
66441 };
66442 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
66443 */
66444 static inline int is_slob_page(struct slob_page *sp)
66445 {
66446 - return PageSlab((struct page *)sp);
66447 + return PageSlab((struct page *)sp) && !sp->size;
66448 }
66449
66450 static inline void set_slob_page(struct slob_page *sp)
66451 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
66452
66453 static inline struct slob_page *slob_page(const void *addr)
66454 {
66455 - return (struct slob_page *)virt_to_page(addr);
66456 + return (struct slob_page *)virt_to_head_page(addr);
66457 }
66458
66459 /*
66460 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
66461 /*
66462 * Return the size of a slob block.
66463 */
66464 -static slobidx_t slob_units(slob_t *s)
66465 +static slobidx_t slob_units(const slob_t *s)
66466 {
66467 if (s->units > 0)
66468 return s->units;
66469 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
66470 /*
66471 * Return the next free slob block pointer after this one.
66472 */
66473 -static slob_t *slob_next(slob_t *s)
66474 +static slob_t *slob_next(const slob_t *s)
66475 {
66476 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66477 slobidx_t next;
66478 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
66479 /*
66480 * Returns true if s is the last free block in its page.
66481 */
66482 -static int slob_last(slob_t *s)
66483 +static int slob_last(const slob_t *s)
66484 {
66485 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66486 }
66487 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
66488 if (!page)
66489 return NULL;
66490
66491 + set_slob_page(page);
66492 return page_address(page);
66493 }
66494
66495 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
66496 if (!b)
66497 return NULL;
66498 sp = slob_page(b);
66499 - set_slob_page(sp);
66500
66501 spin_lock_irqsave(&slob_lock, flags);
66502 sp->units = SLOB_UNITS(PAGE_SIZE);
66503 sp->free = b;
66504 + sp->size = 0;
66505 INIT_LIST_HEAD(&sp->list);
66506 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66507 set_slob_page_free(sp, slob_list);
66508 @@ -476,10 +479,9 @@ out:
66509 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
66510 */
66511
66512 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66513 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66514 {
66515 - unsigned int *m;
66516 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66517 + slob_t *m;
66518 void *ret;
66519
66520 lockdep_trace_alloc(gfp);
66521 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
66522
66523 if (!m)
66524 return NULL;
66525 - *m = size;
66526 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66527 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66528 + m[0].units = size;
66529 + m[1].units = align;
66530 ret = (void *)m + align;
66531
66532 trace_kmalloc_node(_RET_IP_, ret,
66533 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
66534 gfp |= __GFP_COMP;
66535 ret = slob_new_pages(gfp, order, node);
66536 if (ret) {
66537 - struct page *page;
66538 - page = virt_to_page(ret);
66539 - page->private = size;
66540 + struct slob_page *sp;
66541 + sp = slob_page(ret);
66542 + sp->size = size;
66543 }
66544
66545 trace_kmalloc_node(_RET_IP_, ret,
66546 size, PAGE_SIZE << order, gfp, node);
66547 }
66548
66549 - kmemleak_alloc(ret, size, 1, gfp);
66550 + return ret;
66551 +}
66552 +
66553 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66554 +{
66555 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66556 + void *ret = __kmalloc_node_align(size, gfp, node, align);
66557 +
66558 + if (!ZERO_OR_NULL_PTR(ret))
66559 + kmemleak_alloc(ret, size, 1, gfp);
66560 return ret;
66561 }
66562 EXPORT_SYMBOL(__kmalloc_node);
66563 @@ -531,13 +545,88 @@ void kfree(const void *block)
66564 sp = slob_page(block);
66565 if (is_slob_page(sp)) {
66566 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66567 - unsigned int *m = (unsigned int *)(block - align);
66568 - slob_free(m, *m + align);
66569 - } else
66570 + slob_t *m = (slob_t *)(block - align);
66571 + slob_free(m, m[0].units + align);
66572 + } else {
66573 + clear_slob_page(sp);
66574 + free_slob_page(sp);
66575 + sp->size = 0;
66576 put_page(&sp->page);
66577 + }
66578 }
66579 EXPORT_SYMBOL(kfree);
66580
66581 +void check_object_size(const void *ptr, unsigned long n, bool to)
66582 +{
66583 +
66584 +#ifdef CONFIG_PAX_USERCOPY
66585 + struct slob_page *sp;
66586 + const slob_t *free;
66587 + const void *base;
66588 + unsigned long flags;
66589 +
66590 + if (!n)
66591 + return;
66592 +
66593 + if (ZERO_OR_NULL_PTR(ptr))
66594 + goto report;
66595 +
66596 + if (!virt_addr_valid(ptr))
66597 + return;
66598 +
66599 + sp = slob_page(ptr);
66600 + if (!PageSlab((struct page*)sp)) {
66601 + if (object_is_on_stack(ptr, n) == -1)
66602 + goto report;
66603 + return;
66604 + }
66605 +
66606 + if (sp->size) {
66607 + base = page_address(&sp->page);
66608 + if (base <= ptr && n <= sp->size - (ptr - base))
66609 + return;
66610 + goto report;
66611 + }
66612 +
66613 + /* some tricky double walking to find the chunk */
66614 + spin_lock_irqsave(&slob_lock, flags);
66615 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66616 + free = sp->free;
66617 +
66618 + while (!slob_last(free) && (void *)free <= ptr) {
66619 + base = free + slob_units(free);
66620 + free = slob_next(free);
66621 + }
66622 +
66623 + while (base < (void *)free) {
66624 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66625 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66626 + int offset;
66627 +
66628 + if (ptr < base + align)
66629 + break;
66630 +
66631 + offset = ptr - base - align;
66632 + if (offset >= m) {
66633 + base += size;
66634 + continue;
66635 + }
66636 +
66637 + if (n > m - offset)
66638 + break;
66639 +
66640 + spin_unlock_irqrestore(&slob_lock, flags);
66641 + return;
66642 + }
66643 +
66644 + spin_unlock_irqrestore(&slob_lock, flags);
66645 +report:
66646 + pax_report_usercopy(ptr, n, to, NULL);
66647 +#endif
66648 +
66649 +}
66650 +EXPORT_SYMBOL(check_object_size);
66651 +
66652 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66653 size_t ksize(const void *block)
66654 {
66655 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
66656 sp = slob_page(block);
66657 if (is_slob_page(sp)) {
66658 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66659 - unsigned int *m = (unsigned int *)(block - align);
66660 - return SLOB_UNITS(*m) * SLOB_UNIT;
66661 + slob_t *m = (slob_t *)(block - align);
66662 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66663 } else
66664 - return sp->page.private;
66665 + return sp->size;
66666 }
66667 EXPORT_SYMBOL(ksize);
66668
66669 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
66670 {
66671 struct kmem_cache *c;
66672
66673 +#ifdef CONFIG_PAX_USERCOPY
66674 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
66675 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
66676 +#else
66677 c = slob_alloc(sizeof(struct kmem_cache),
66678 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
66679 +#endif
66680
66681 if (c) {
66682 c->name = name;
66683 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
66684 {
66685 void *b;
66686
66687 +#ifdef CONFIG_PAX_USERCOPY
66688 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66689 +#else
66690 if (c->size < PAGE_SIZE) {
66691 b = slob_alloc(c->size, flags, c->align, node);
66692 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66693 SLOB_UNITS(c->size) * SLOB_UNIT,
66694 flags, node);
66695 } else {
66696 + struct slob_page *sp;
66697 +
66698 b = slob_new_pages(flags, get_order(c->size), node);
66699 + sp = slob_page(b);
66700 + sp->size = c->size;
66701 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66702 PAGE_SIZE << get_order(c->size),
66703 flags, node);
66704 }
66705 +#endif
66706
66707 if (c->ctor)
66708 c->ctor(b);
66709 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66710
66711 static void __kmem_cache_free(void *b, int size)
66712 {
66713 - if (size < PAGE_SIZE)
66714 + struct slob_page *sp = slob_page(b);
66715 +
66716 + if (is_slob_page(sp))
66717 slob_free(b, size);
66718 - else
66719 + else {
66720 + clear_slob_page(sp);
66721 + free_slob_page(sp);
66722 + sp->size = 0;
66723 slob_free_pages(b, get_order(size));
66724 + }
66725 }
66726
66727 static void kmem_rcu_free(struct rcu_head *head)
66728 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
66729
66730 void kmem_cache_free(struct kmem_cache *c, void *b)
66731 {
66732 + int size = c->size;
66733 +
66734 +#ifdef CONFIG_PAX_USERCOPY
66735 + if (size + c->align < PAGE_SIZE) {
66736 + size += c->align;
66737 + b -= c->align;
66738 + }
66739 +#endif
66740 +
66741 kmemleak_free_recursive(b, c->flags);
66742 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66743 struct slob_rcu *slob_rcu;
66744 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66745 - slob_rcu->size = c->size;
66746 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66747 + slob_rcu->size = size;
66748 call_rcu(&slob_rcu->head, kmem_rcu_free);
66749 } else {
66750 - __kmem_cache_free(b, c->size);
66751 + __kmem_cache_free(b, size);
66752 }
66753
66754 +#ifdef CONFIG_PAX_USERCOPY
66755 + trace_kfree(_RET_IP_, b);
66756 +#else
66757 trace_kmem_cache_free(_RET_IP_, b);
66758 +#endif
66759 +
66760 }
66761 EXPORT_SYMBOL(kmem_cache_free);
66762
66763 diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
66764 --- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
66765 +++ linux-3.0.4/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
66766 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
66767 if (!t->addr)
66768 return;
66769
66770 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66771 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66772 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66773 }
66774
66775 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
66776
66777 page = virt_to_head_page(x);
66778
66779 + BUG_ON(!PageSlab(page));
66780 +
66781 slab_free(s, page, x, _RET_IP_);
66782
66783 trace_kmem_cache_free(_RET_IP_, x);
66784 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
66785 * Merge control. If this is set then no merging of slab caches will occur.
66786 * (Could be removed. This was introduced to pacify the merge skeptics.)
66787 */
66788 -static int slub_nomerge;
66789 +static int slub_nomerge = 1;
66790
66791 /*
66792 * Calculate the order of allocation given an slab object size.
66793 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
66794 * list to avoid pounding the page allocator excessively.
66795 */
66796 set_min_partial(s, ilog2(s->size));
66797 - s->refcount = 1;
66798 + atomic_set(&s->refcount, 1);
66799 #ifdef CONFIG_NUMA
66800 s->remote_node_defrag_ratio = 1000;
66801 #endif
66802 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
66803 void kmem_cache_destroy(struct kmem_cache *s)
66804 {
66805 down_write(&slub_lock);
66806 - s->refcount--;
66807 - if (!s->refcount) {
66808 + if (atomic_dec_and_test(&s->refcount)) {
66809 list_del(&s->list);
66810 if (kmem_cache_close(s)) {
66811 printk(KERN_ERR "SLUB %s: %s called for cache that "
66812 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
66813 EXPORT_SYMBOL(__kmalloc_node);
66814 #endif
66815
66816 +void check_object_size(const void *ptr, unsigned long n, bool to)
66817 +{
66818 +
66819 +#ifdef CONFIG_PAX_USERCOPY
66820 + struct page *page;
66821 + struct kmem_cache *s = NULL;
66822 + unsigned long offset;
66823 +
66824 + if (!n)
66825 + return;
66826 +
66827 + if (ZERO_OR_NULL_PTR(ptr))
66828 + goto report;
66829 +
66830 + if (!virt_addr_valid(ptr))
66831 + return;
66832 +
66833 + page = virt_to_head_page(ptr);
66834 +
66835 + if (!PageSlab(page)) {
66836 + if (object_is_on_stack(ptr, n) == -1)
66837 + goto report;
66838 + return;
66839 + }
66840 +
66841 + s = page->slab;
66842 + if (!(s->flags & SLAB_USERCOPY))
66843 + goto report;
66844 +
66845 + offset = (ptr - page_address(page)) % s->size;
66846 + if (offset <= s->objsize && n <= s->objsize - offset)
66847 + return;
66848 +
66849 +report:
66850 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66851 +#endif
66852 +
66853 +}
66854 +EXPORT_SYMBOL(check_object_size);
66855 +
66856 size_t ksize(const void *object)
66857 {
66858 struct page *page;
66859 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
66860 int node;
66861
66862 list_add(&s->list, &slab_caches);
66863 - s->refcount = -1;
66864 + atomic_set(&s->refcount, -1);
66865
66866 for_each_node_state(node, N_NORMAL_MEMORY) {
66867 struct kmem_cache_node *n = get_node(s, node);
66868 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
66869
66870 /* Caches that are not of the two-to-the-power-of size */
66871 if (KMALLOC_MIN_SIZE <= 32) {
66872 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
66873 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
66874 caches++;
66875 }
66876
66877 if (KMALLOC_MIN_SIZE <= 64) {
66878 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
66879 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
66880 caches++;
66881 }
66882
66883 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66884 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
66885 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
66886 caches++;
66887 }
66888
66889 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
66890 /*
66891 * We may have set a slab to be unmergeable during bootstrap.
66892 */
66893 - if (s->refcount < 0)
66894 + if (atomic_read(&s->refcount) < 0)
66895 return 1;
66896
66897 return 0;
66898 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
66899 down_write(&slub_lock);
66900 s = find_mergeable(size, align, flags, name, ctor);
66901 if (s) {
66902 - s->refcount++;
66903 + atomic_inc(&s->refcount);
66904 /*
66905 * Adjust the object sizes so that we clear
66906 * the complete object on kzalloc.
66907 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
66908 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
66909
66910 if (sysfs_slab_alias(s, name)) {
66911 - s->refcount--;
66912 + atomic_dec(&s->refcount);
66913 goto err;
66914 }
66915 up_write(&slub_lock);
66916 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
66917
66918 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66919 {
66920 - return sprintf(buf, "%d\n", s->refcount - 1);
66921 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66922 }
66923 SLAB_ATTR_RO(aliases);
66924
66925 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
66926
66927 static int __init slab_proc_init(void)
66928 {
66929 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66930 + mode_t gr_mode = S_IRUGO;
66931 +
66932 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66933 + gr_mode = S_IRUSR;
66934 +#endif
66935 +
66936 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66937 return 0;
66938 }
66939 module_init(slab_proc_init);
66940 diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
66941 --- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
66942 +++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
66943 @@ -31,6 +31,7 @@
66944 #include <linux/backing-dev.h>
66945 #include <linux/memcontrol.h>
66946 #include <linux/gfp.h>
66947 +#include <linux/hugetlb.h>
66948
66949 #include "internal.h"
66950
66951 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
66952
66953 __page_cache_release(page);
66954 dtor = get_compound_page_dtor(page);
66955 + if (!PageHuge(page))
66956 + BUG_ON(dtor != free_compound_page);
66957 (*dtor)(page);
66958 }
66959
66960 diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
66961 --- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
66962 +++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
66963 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
66964
66965 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
66966 /* Activity counter to indicate that a swapon or swapoff has occurred */
66967 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
66968 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
66969
66970 static inline unsigned char swap_count(unsigned char ent)
66971 {
66972 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
66973 }
66974 filp_close(swap_file, NULL);
66975 err = 0;
66976 - atomic_inc(&proc_poll_event);
66977 + atomic_inc_unchecked(&proc_poll_event);
66978 wake_up_interruptible(&proc_poll_wait);
66979
66980 out_dput:
66981 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
66982
66983 poll_wait(file, &proc_poll_wait, wait);
66984
66985 - if (s->event != atomic_read(&proc_poll_event)) {
66986 - s->event = atomic_read(&proc_poll_event);
66987 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
66988 + s->event = atomic_read_unchecked(&proc_poll_event);
66989 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
66990 }
66991
66992 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
66993 }
66994
66995 s->seq.private = s;
66996 - s->event = atomic_read(&proc_poll_event);
66997 + s->event = atomic_read_unchecked(&proc_poll_event);
66998 return ret;
66999 }
67000
67001 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67002 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67003
67004 mutex_unlock(&swapon_mutex);
67005 - atomic_inc(&proc_poll_event);
67006 + atomic_inc_unchecked(&proc_poll_event);
67007 wake_up_interruptible(&proc_poll_wait);
67008
67009 if (S_ISREG(inode->i_mode))
67010 diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67011 --- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
67012 +++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
67013 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67014 * allocated buffer. Use this if you don't want to free the buffer immediately
67015 * like, for example, with RCU.
67016 */
67017 +#undef __krealloc
67018 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67019 {
67020 void *ret;
67021 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67022 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
67023 * %NULL pointer, the object pointed to is freed.
67024 */
67025 +#undef krealloc
67026 void *krealloc(const void *p, size_t new_size, gfp_t flags)
67027 {
67028 void *ret;
67029 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67030 void arch_pick_mmap_layout(struct mm_struct *mm)
67031 {
67032 mm->mmap_base = TASK_UNMAPPED_BASE;
67033 +
67034 +#ifdef CONFIG_PAX_RANDMMAP
67035 + if (mm->pax_flags & MF_PAX_RANDMMAP)
67036 + mm->mmap_base += mm->delta_mmap;
67037 +#endif
67038 +
67039 mm->get_unmapped_area = arch_get_unmapped_area;
67040 mm->unmap_area = arch_unmap_area;
67041 }
67042 diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67043 --- linux-3.0.4/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
67044 +++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
67045 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67046
67047 pte = pte_offset_kernel(pmd, addr);
67048 do {
67049 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67050 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67051 +
67052 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67053 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67054 + BUG_ON(!pte_exec(*pte));
67055 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67056 + continue;
67057 + }
67058 +#endif
67059 +
67060 + {
67061 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67062 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67063 + }
67064 } while (pte++, addr += PAGE_SIZE, addr != end);
67065 }
67066
67067 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67068 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67069 {
67070 pte_t *pte;
67071 + int ret = -ENOMEM;
67072
67073 /*
67074 * nr is a running index into the array which helps higher level
67075 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67076 pte = pte_alloc_kernel(pmd, addr);
67077 if (!pte)
67078 return -ENOMEM;
67079 +
67080 + pax_open_kernel();
67081 do {
67082 struct page *page = pages[*nr];
67083
67084 - if (WARN_ON(!pte_none(*pte)))
67085 - return -EBUSY;
67086 - if (WARN_ON(!page))
67087 - return -ENOMEM;
67088 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67089 + if (pgprot_val(prot) & _PAGE_NX)
67090 +#endif
67091 +
67092 + if (WARN_ON(!pte_none(*pte))) {
67093 + ret = -EBUSY;
67094 + goto out;
67095 + }
67096 + if (WARN_ON(!page)) {
67097 + ret = -ENOMEM;
67098 + goto out;
67099 + }
67100 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67101 (*nr)++;
67102 } while (pte++, addr += PAGE_SIZE, addr != end);
67103 - return 0;
67104 + ret = 0;
67105 +out:
67106 + pax_close_kernel();
67107 + return ret;
67108 }
67109
67110 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67111 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67112 * and fall back on vmalloc() if that fails. Others
67113 * just put it in the vmalloc space.
67114 */
67115 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67116 +#ifdef CONFIG_MODULES
67117 +#ifdef MODULES_VADDR
67118 unsigned long addr = (unsigned long)x;
67119 if (addr >= MODULES_VADDR && addr < MODULES_END)
67120 return 1;
67121 #endif
67122 +
67123 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67124 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67125 + return 1;
67126 +#endif
67127 +
67128 +#endif
67129 +
67130 return is_vmalloc_addr(x);
67131 }
67132
67133 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
67134
67135 if (!pgd_none(*pgd)) {
67136 pud_t *pud = pud_offset(pgd, addr);
67137 +#ifdef CONFIG_X86
67138 + if (!pud_large(*pud))
67139 +#endif
67140 if (!pud_none(*pud)) {
67141 pmd_t *pmd = pmd_offset(pud, addr);
67142 +#ifdef CONFIG_X86
67143 + if (!pmd_large(*pmd))
67144 +#endif
67145 if (!pmd_none(*pmd)) {
67146 pte_t *ptep, pte;
67147
67148 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67149 struct vm_struct *area;
67150
67151 BUG_ON(in_interrupt());
67152 +
67153 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67154 + if (flags & VM_KERNEXEC) {
67155 + if (start != VMALLOC_START || end != VMALLOC_END)
67156 + return NULL;
67157 + start = (unsigned long)MODULES_EXEC_VADDR;
67158 + end = (unsigned long)MODULES_EXEC_END;
67159 + }
67160 +#endif
67161 +
67162 if (flags & VM_IOREMAP) {
67163 int bit = fls(size);
67164
67165 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67166 if (count > totalram_pages)
67167 return NULL;
67168
67169 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67170 + if (!(pgprot_val(prot) & _PAGE_NX))
67171 + flags |= VM_KERNEXEC;
67172 +#endif
67173 +
67174 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67175 __builtin_return_address(0));
67176 if (!area)
67177 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67178 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67179 return NULL;
67180
67181 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67182 + if (!(pgprot_val(prot) & _PAGE_NX))
67183 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67184 + node, gfp_mask, caller);
67185 + else
67186 +#endif
67187 +
67188 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67189 gfp_mask, caller);
67190
67191 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
67192 gfp_mask, prot, node, caller);
67193 }
67194
67195 +#undef __vmalloc
67196 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67197 {
67198 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67199 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
67200 * For tight control over page level allocator and protection flags
67201 * use __vmalloc() instead.
67202 */
67203 +#undef vmalloc
67204 void *vmalloc(unsigned long size)
67205 {
67206 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
67207 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
67208 * For tight control over page level allocator and protection flags
67209 * use __vmalloc() instead.
67210 */
67211 +#undef vzalloc
67212 void *vzalloc(unsigned long size)
67213 {
67214 return __vmalloc_node_flags(size, -1,
67215 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
67216 * The resulting memory area is zeroed so it can be mapped to userspace
67217 * without leaking data.
67218 */
67219 +#undef vmalloc_user
67220 void *vmalloc_user(unsigned long size)
67221 {
67222 struct vm_struct *area;
67223 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
67224 * For tight control over page level allocator and protection flags
67225 * use __vmalloc() instead.
67226 */
67227 +#undef vmalloc_node
67228 void *vmalloc_node(unsigned long size, int node)
67229 {
67230 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67231 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
67232 * For tight control over page level allocator and protection flags
67233 * use __vmalloc_node() instead.
67234 */
67235 +#undef vzalloc_node
67236 void *vzalloc_node(unsigned long size, int node)
67237 {
67238 return __vmalloc_node_flags(size, node,
67239 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
67240 * For tight control over page level allocator and protection flags
67241 * use __vmalloc() instead.
67242 */
67243 -
67244 +#undef vmalloc_exec
67245 void *vmalloc_exec(unsigned long size)
67246 {
67247 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67248 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67249 -1, __builtin_return_address(0));
67250 }
67251
67252 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
67253 * Allocate enough 32bit PA addressable pages to cover @size from the
67254 * page level allocator and map them into contiguous kernel virtual space.
67255 */
67256 +#undef vmalloc_32
67257 void *vmalloc_32(unsigned long size)
67258 {
67259 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67260 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
67261 * The resulting memory area is 32bit addressable and zeroed so it can be
67262 * mapped to userspace without leaking data.
67263 */
67264 +#undef vmalloc_32_user
67265 void *vmalloc_32_user(unsigned long size)
67266 {
67267 struct vm_struct *area;
67268 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
67269 unsigned long uaddr = vma->vm_start;
67270 unsigned long usize = vma->vm_end - vma->vm_start;
67271
67272 + BUG_ON(vma->vm_mirror);
67273 +
67274 if ((PAGE_SIZE-1) & (unsigned long)addr)
67275 return -EINVAL;
67276
67277 diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
67278 --- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
67279 +++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
67280 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
67281 *
67282 * vm_stat contains the global counters
67283 */
67284 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67285 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67286 EXPORT_SYMBOL(vm_stat);
67287
67288 #ifdef CONFIG_SMP
67289 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
67290 v = p->vm_stat_diff[i];
67291 p->vm_stat_diff[i] = 0;
67292 local_irq_restore(flags);
67293 - atomic_long_add(v, &zone->vm_stat[i]);
67294 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67295 global_diff[i] += v;
67296 #ifdef CONFIG_NUMA
67297 /* 3 seconds idle till flush */
67298 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
67299
67300 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67301 if (global_diff[i])
67302 - atomic_long_add(global_diff[i], &vm_stat[i]);
67303 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67304 }
67305
67306 #endif
67307 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
67308 start_cpu_timer(cpu);
67309 #endif
67310 #ifdef CONFIG_PROC_FS
67311 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67312 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67313 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67314 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67315 + {
67316 + mode_t gr_mode = S_IRUGO;
67317 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67318 + gr_mode = S_IRUSR;
67319 +#endif
67320 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67321 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67322 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67323 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67324 +#else
67325 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67326 +#endif
67327 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67328 + }
67329 #endif
67330 return 0;
67331 }
67332 diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
67333 --- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
67334 +++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
67335 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
67336 err = -EPERM;
67337 if (!capable(CAP_NET_ADMIN))
67338 break;
67339 - if ((args.u.name_type >= 0) &&
67340 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67341 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67342 struct vlan_net *vn;
67343
67344 vn = net_generic(net, vlan_net_id);
67345 diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
67346 --- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
67347 +++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
67348 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
67349 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67350 return 1;
67351 atm_return(vcc, truesize);
67352 - atomic_inc(&vcc->stats->rx_drop);
67353 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67354 return 0;
67355 }
67356 EXPORT_SYMBOL(atm_charge);
67357 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
67358 }
67359 }
67360 atm_return(vcc, guess);
67361 - atomic_inc(&vcc->stats->rx_drop);
67362 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67363 return NULL;
67364 }
67365 EXPORT_SYMBOL(atm_alloc_charge);
67366 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
67367
67368 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67369 {
67370 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67371 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67372 __SONET_ITEMS
67373 #undef __HANDLE_ITEM
67374 }
67375 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
67376
67377 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67378 {
67379 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67380 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67381 __SONET_ITEMS
67382 #undef __HANDLE_ITEM
67383 }
67384 diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
67385 --- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
67386 +++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
67387 @@ -48,7 +48,7 @@ struct lane2_ops {
67388 const u8 *tlvs, u32 sizeoftlvs);
67389 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
67390 const u8 *tlvs, u32 sizeoftlvs);
67391 -};
67392 +} __no_const;
67393
67394 /*
67395 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
67396 diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
67397 --- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
67398 +++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
67399 @@ -33,7 +33,7 @@ struct mpoa_client {
67400 struct mpc_parameters parameters; /* parameters for this client */
67401
67402 const struct net_device_ops *old_ops;
67403 - struct net_device_ops new_ops;
67404 + net_device_ops_no_const new_ops;
67405 };
67406
67407
67408 diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
67409 --- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
67410 +++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
67411 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
67412 struct timeval now;
67413 struct k_message msg;
67414
67415 + pax_track_stack();
67416 +
67417 do_gettimeofday(&now);
67418
67419 read_lock_bh(&client->ingress_lock);
67420 diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
67421 --- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
67422 +++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
67423 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
67424 const struct k_atm_aal_stats *stats)
67425 {
67426 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67427 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
67428 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
67429 - atomic_read(&stats->rx_drop));
67430 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67431 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67432 + atomic_read_unchecked(&stats->rx_drop));
67433 }
67434
67435 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67436 diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
67437 --- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
67438 +++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
67439 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
67440 static void copy_aal_stats(struct k_atm_aal_stats *from,
67441 struct atm_aal_stats *to)
67442 {
67443 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67444 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67445 __AAL_STAT_ITEMS
67446 #undef __HANDLE_ITEM
67447 }
67448 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
67449 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67450 struct atm_aal_stats *to)
67451 {
67452 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67453 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67454 __AAL_STAT_ITEMS
67455 #undef __HANDLE_ITEM
67456 }
67457 diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
67458 --- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
67459 +++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
67460 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
67461 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
67462 dev_add_pack(&hard_iface->batman_adv_ptype);
67463
67464 - atomic_set(&hard_iface->seqno, 1);
67465 - atomic_set(&hard_iface->frag_seqno, 1);
67466 + atomic_set_unchecked(&hard_iface->seqno, 1);
67467 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
67468 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
67469 hard_iface->net_dev->name);
67470
67471 diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
67472 --- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
67473 +++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
67474 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
67475 return;
67476
67477 /* could be changed by schedule_own_packet() */
67478 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
67479 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
67480
67481 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
67482
67483 diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
67484 --- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
67485 +++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
67486 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
67487
67488 /* change sequence number to network order */
67489 batman_packet->seqno =
67490 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
67491 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
67492
67493 if (vis_server == VIS_TYPE_SERVER_SYNC)
67494 batman_packet->flags |= VIS_SERVER;
67495 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
67496 else
67497 batman_packet->gw_flags = 0;
67498
67499 - atomic_inc(&hard_iface->seqno);
67500 + atomic_inc_unchecked(&hard_iface->seqno);
67501
67502 slide_own_bcast_window(hard_iface);
67503 send_time = own_send_time(bat_priv);
67504 diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
67505 --- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
67506 +++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
67507 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
67508
67509 /* set broadcast sequence number */
67510 bcast_packet->seqno =
67511 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
67512 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
67513
67514 add_bcast_packet_to_list(bat_priv, skb);
67515
67516 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
67517 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
67518
67519 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
67520 - atomic_set(&bat_priv->bcast_seqno, 1);
67521 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
67522 atomic_set(&bat_priv->tt_local_changed, 0);
67523
67524 bat_priv->primary_if = NULL;
67525 diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
67526 --- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
67527 +++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
67528 @@ -38,8 +38,8 @@ struct hard_iface {
67529 int16_t if_num;
67530 char if_status;
67531 struct net_device *net_dev;
67532 - atomic_t seqno;
67533 - atomic_t frag_seqno;
67534 + atomic_unchecked_t seqno;
67535 + atomic_unchecked_t frag_seqno;
67536 unsigned char *packet_buff;
67537 int packet_len;
67538 struct kobject *hardif_obj;
67539 @@ -142,7 +142,7 @@ struct bat_priv {
67540 atomic_t orig_interval; /* uint */
67541 atomic_t hop_penalty; /* uint */
67542 atomic_t log_level; /* uint */
67543 - atomic_t bcast_seqno;
67544 + atomic_unchecked_t bcast_seqno;
67545 atomic_t bcast_queue_left;
67546 atomic_t batman_queue_left;
67547 char num_ifaces;
67548 diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
67549 --- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
67550 +++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
67551 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
67552 frag1->flags = UNI_FRAG_HEAD | large_tail;
67553 frag2->flags = large_tail;
67554
67555 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
67556 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
67557 frag1->seqno = htons(seqno - 1);
67558 frag2->seqno = htons(seqno);
67559
67560 diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
67561 --- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
67562 +++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
67563 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
67564 nexthdr = ip6h->nexthdr;
67565 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
67566
67567 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
67568 + if (nexthdr != IPPROTO_ICMPV6)
67569 return 0;
67570
67571 /* Okay, we found ICMPv6 header */
67572 diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
67573 --- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
67574 +++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
67575 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
67576 tmp.valid_hooks = t->table->valid_hooks;
67577 }
67578 mutex_unlock(&ebt_mutex);
67579 - if (copy_to_user(user, &tmp, *len) != 0){
67580 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
67581 BUGPRINT("c2u Didn't work\n");
67582 ret = -EFAULT;
67583 break;
67584 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
67585 int ret;
67586 void __user *pos;
67587
67588 + pax_track_stack();
67589 +
67590 memset(&tinfo, 0, sizeof(tinfo));
67591
67592 if (cmd == EBT_SO_GET_ENTRIES) {
67593 diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
67594 --- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
67595 +++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
67596 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
67597 #ifdef CONFIG_DEBUG_FS
67598 struct debug_fs_counter {
67599 atomic_t caif_nr_socks;
67600 - atomic_t caif_sock_create;
67601 - atomic_t num_connect_req;
67602 - atomic_t num_connect_resp;
67603 - atomic_t num_connect_fail_resp;
67604 - atomic_t num_disconnect;
67605 - atomic_t num_remote_shutdown_ind;
67606 - atomic_t num_tx_flow_off_ind;
67607 - atomic_t num_tx_flow_on_ind;
67608 - atomic_t num_rx_flow_off;
67609 - atomic_t num_rx_flow_on;
67610 + atomic_unchecked_t caif_sock_create;
67611 + atomic_unchecked_t num_connect_req;
67612 + atomic_unchecked_t num_connect_resp;
67613 + atomic_unchecked_t num_connect_fail_resp;
67614 + atomic_unchecked_t num_disconnect;
67615 + atomic_unchecked_t num_remote_shutdown_ind;
67616 + atomic_unchecked_t num_tx_flow_off_ind;
67617 + atomic_unchecked_t num_tx_flow_on_ind;
67618 + atomic_unchecked_t num_rx_flow_off;
67619 + atomic_unchecked_t num_rx_flow_on;
67620 };
67621 static struct debug_fs_counter cnt;
67622 #define dbfs_atomic_inc(v) atomic_inc_return(v)
67623 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
67624 #define dbfs_atomic_dec(v) atomic_dec_return(v)
67625 #else
67626 #define dbfs_atomic_inc(v) 0
67627 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
67628 atomic_read(&cf_sk->sk.sk_rmem_alloc),
67629 sk_rcvbuf_lowwater(cf_sk));
67630 set_rx_flow_off(cf_sk);
67631 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
67632 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67633 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67634 }
67635
67636 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
67637 set_rx_flow_off(cf_sk);
67638 if (net_ratelimit())
67639 pr_debug("sending flow OFF due to rmem_schedule\n");
67640 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
67641 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67642 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67643 }
67644 skb->dev = NULL;
67645 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
67646 switch (flow) {
67647 case CAIF_CTRLCMD_FLOW_ON_IND:
67648 /* OK from modem to start sending again */
67649 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
67650 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
67651 set_tx_flow_on(cf_sk);
67652 cf_sk->sk.sk_state_change(&cf_sk->sk);
67653 break;
67654
67655 case CAIF_CTRLCMD_FLOW_OFF_IND:
67656 /* Modem asks us to shut up */
67657 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
67658 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
67659 set_tx_flow_off(cf_sk);
67660 cf_sk->sk.sk_state_change(&cf_sk->sk);
67661 break;
67662 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
67663 /* We're now connected */
67664 caif_client_register_refcnt(&cf_sk->layer,
67665 cfsk_hold, cfsk_put);
67666 - dbfs_atomic_inc(&cnt.num_connect_resp);
67667 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
67668 cf_sk->sk.sk_state = CAIF_CONNECTED;
67669 set_tx_flow_on(cf_sk);
67670 cf_sk->sk.sk_state_change(&cf_sk->sk);
67671 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
67672
67673 case CAIF_CTRLCMD_INIT_FAIL_RSP:
67674 /* Connect request failed */
67675 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
67676 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
67677 cf_sk->sk.sk_err = ECONNREFUSED;
67678 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
67679 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67680 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
67681
67682 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
67683 /* Modem has closed this connection, or device is down. */
67684 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
67685 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
67686 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67687 cf_sk->sk.sk_err = ECONNRESET;
67688 set_rx_flow_on(cf_sk);
67689 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
67690 return;
67691
67692 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
67693 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
67694 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
67695 set_rx_flow_on(cf_sk);
67696 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
67697 }
67698 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
67699 /*ifindex = id of the interface.*/
67700 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
67701
67702 - dbfs_atomic_inc(&cnt.num_connect_req);
67703 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
67704 cf_sk->layer.receive = caif_sktrecv_cb;
67705
67706 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
67707 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
67708 spin_unlock_bh(&sk->sk_receive_queue.lock);
67709 sock->sk = NULL;
67710
67711 - dbfs_atomic_inc(&cnt.num_disconnect);
67712 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
67713
67714 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
67715 if (cf_sk->debugfs_socket_dir != NULL)
67716 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
67717 cf_sk->conn_req.protocol = protocol;
67718 /* Increase the number of sockets created. */
67719 dbfs_atomic_inc(&cnt.caif_nr_socks);
67720 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
67721 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
67722 #ifdef CONFIG_DEBUG_FS
67723 if (!IS_ERR(debugfsdir)) {
67724
67725 diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
67726 --- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
67727 +++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
67728 @@ -9,6 +9,7 @@
67729 #include <linux/stddef.h>
67730 #include <linux/spinlock.h>
67731 #include <linux/slab.h>
67732 +#include <linux/sched.h>
67733 #include <net/caif/caif_layer.h>
67734 #include <net/caif/cfpkt.h>
67735 #include <net/caif/cfctrl.h>
67736 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
67737 dev_info.id = 0xff;
67738 memset(this, 0, sizeof(*this));
67739 cfsrvl_init(&this->serv, 0, &dev_info, false);
67740 - atomic_set(&this->req_seq_no, 1);
67741 - atomic_set(&this->rsp_seq_no, 1);
67742 + atomic_set_unchecked(&this->req_seq_no, 1);
67743 + atomic_set_unchecked(&this->rsp_seq_no, 1);
67744 this->serv.layer.receive = cfctrl_recv;
67745 sprintf(this->serv.layer.name, "ctrl");
67746 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
67747 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
67748 struct cfctrl_request_info *req)
67749 {
67750 spin_lock_bh(&ctrl->info_list_lock);
67751 - atomic_inc(&ctrl->req_seq_no);
67752 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
67753 + atomic_inc_unchecked(&ctrl->req_seq_no);
67754 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
67755 list_add_tail(&req->list, &ctrl->list);
67756 spin_unlock_bh(&ctrl->info_list_lock);
67757 }
67758 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
67759 if (p != first)
67760 pr_warn("Requests are not received in order\n");
67761
67762 - atomic_set(&ctrl->rsp_seq_no,
67763 + atomic_set_unchecked(&ctrl->rsp_seq_no,
67764 p->sequence_no);
67765 list_del(&p->list);
67766 goto out;
67767 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
67768 struct cfctrl *cfctrl = container_obj(layer);
67769 struct cfctrl_request_info rsp, *req;
67770
67771 + pax_track_stack();
67772
67773 cfpkt_extr_head(pkt, &cmdrsp, 1);
67774 cmd = cmdrsp & CFCTRL_CMD_MASK;
67775 diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
67776 --- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
67777 +++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
67778 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
67779 }
67780
67781 kfree_skb(skb);
67782 - atomic_inc(&sk->sk_drops);
67783 + atomic_inc_unchecked(&sk->sk_drops);
67784 sk_mem_reclaim_partial(sk);
67785
67786 return err;
67787 diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
67788 --- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
67789 +++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
67790 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
67791 if (no_module && capable(CAP_NET_ADMIN))
67792 no_module = request_module("netdev-%s", name);
67793 if (no_module && capable(CAP_SYS_MODULE)) {
67794 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67795 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67796 +#else
67797 if (!request_module("%s", name))
67798 pr_err("Loading kernel module for a network device "
67799 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67800 "instead\n", name);
67801 +#endif
67802 }
67803 }
67804 EXPORT_SYMBOL(dev_load);
67805 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
67806
67807 struct dev_gso_cb {
67808 void (*destructor)(struct sk_buff *skb);
67809 -};
67810 +} __no_const;
67811
67812 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
67813
67814 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
67815 }
67816 EXPORT_SYMBOL(netif_rx_ni);
67817
67818 -static void net_tx_action(struct softirq_action *h)
67819 +static void net_tx_action(void)
67820 {
67821 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67822
67823 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
67824 }
67825 EXPORT_SYMBOL(netif_napi_del);
67826
67827 -static void net_rx_action(struct softirq_action *h)
67828 +static void net_rx_action(void)
67829 {
67830 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67831 unsigned long time_limit = jiffies + 2;
67832 diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
67833 --- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
67834 +++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
67835 @@ -60,7 +60,7 @@ struct flow_cache {
67836 struct timer_list rnd_timer;
67837 };
67838
67839 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67840 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67841 EXPORT_SYMBOL(flow_cache_genid);
67842 static struct flow_cache flow_cache_global;
67843 static struct kmem_cache *flow_cachep __read_mostly;
67844 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
67845
67846 static int flow_entry_valid(struct flow_cache_entry *fle)
67847 {
67848 - if (atomic_read(&flow_cache_genid) != fle->genid)
67849 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
67850 return 0;
67851 if (fle->object && !fle->object->ops->check(fle->object))
67852 return 0;
67853 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
67854 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
67855 fcp->hash_count++;
67856 }
67857 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
67858 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
67859 flo = fle->object;
67860 if (!flo)
67861 goto ret_object;
67862 @@ -274,7 +274,7 @@ nocache:
67863 }
67864 flo = resolver(net, key, family, dir, flo, ctx);
67865 if (fle) {
67866 - fle->genid = atomic_read(&flow_cache_genid);
67867 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67868 if (!IS_ERR(flo))
67869 fle->object = flo;
67870 else
67871 diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
67872 --- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
67873 +++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
67874 @@ -56,7 +56,7 @@
67875 struct rtnl_link {
67876 rtnl_doit_func doit;
67877 rtnl_dumpit_func dumpit;
67878 -};
67879 +} __no_const;
67880
67881 static DEFINE_MUTEX(rtnl_mutex);
67882
67883 diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
67884 --- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
67885 +++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
67886 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
67887 struct sock *sk = skb->sk;
67888 int ret = 0;
67889
67890 + pax_track_stack();
67891 +
67892 if (splice_grow_spd(pipe, &spd))
67893 return -ENOMEM;
67894
67895 diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
67896 --- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
67897 +++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
67898 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67899 */
67900 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
67901 (unsigned)sk->sk_rcvbuf) {
67902 - atomic_inc(&sk->sk_drops);
67903 + atomic_inc_unchecked(&sk->sk_drops);
67904 return -ENOMEM;
67905 }
67906
67907 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67908 return err;
67909
67910 if (!sk_rmem_schedule(sk, skb->truesize)) {
67911 - atomic_inc(&sk->sk_drops);
67912 + atomic_inc_unchecked(&sk->sk_drops);
67913 return -ENOBUFS;
67914 }
67915
67916 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67917 skb_dst_force(skb);
67918
67919 spin_lock_irqsave(&list->lock, flags);
67920 - skb->dropcount = atomic_read(&sk->sk_drops);
67921 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67922 __skb_queue_tail(list, skb);
67923 spin_unlock_irqrestore(&list->lock, flags);
67924
67925 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
67926 skb->dev = NULL;
67927
67928 if (sk_rcvqueues_full(sk, skb)) {
67929 - atomic_inc(&sk->sk_drops);
67930 + atomic_inc_unchecked(&sk->sk_drops);
67931 goto discard_and_relse;
67932 }
67933 if (nested)
67934 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
67935 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
67936 } else if (sk_add_backlog(sk, skb)) {
67937 bh_unlock_sock(sk);
67938 - atomic_inc(&sk->sk_drops);
67939 + atomic_inc_unchecked(&sk->sk_drops);
67940 goto discard_and_relse;
67941 }
67942
67943 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
67944 if (len > sizeof(peercred))
67945 len = sizeof(peercred);
67946 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
67947 - if (copy_to_user(optval, &peercred, len))
67948 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
67949 return -EFAULT;
67950 goto lenout;
67951 }
67952 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
67953 return -ENOTCONN;
67954 if (lv < len)
67955 return -EINVAL;
67956 - if (copy_to_user(optval, address, len))
67957 + if (len > sizeof(address) || copy_to_user(optval, address, len))
67958 return -EFAULT;
67959 goto lenout;
67960 }
67961 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
67962
67963 if (len > lv)
67964 len = lv;
67965 - if (copy_to_user(optval, &v, len))
67966 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
67967 return -EFAULT;
67968 lenout:
67969 if (put_user(len, optlen))
67970 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
67971 */
67972 smp_wmb();
67973 atomic_set(&sk->sk_refcnt, 1);
67974 - atomic_set(&sk->sk_drops, 0);
67975 + atomic_set_unchecked(&sk->sk_drops, 0);
67976 }
67977 EXPORT_SYMBOL(sock_init_data);
67978
67979 diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
67980 --- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
67981 +++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
67982 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
67983
67984 if (len > *lenp) len = *lenp;
67985
67986 - if (copy_to_user(buffer, addr, len))
67987 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67988 return -EFAULT;
67989
67990 *lenp = len;
67991 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
67992
67993 if (len > *lenp) len = *lenp;
67994
67995 - if (copy_to_user(buffer, devname, len))
67996 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67997 return -EFAULT;
67998
67999 *lenp = len;
68000 diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68001 --- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
68002 +++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
68003 @@ -4,7 +4,7 @@
68004
68005 config ECONET
68006 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68007 - depends on EXPERIMENTAL && INET
68008 + depends on EXPERIMENTAL && INET && BROKEN
68009 ---help---
68010 Econet is a fairly old and slow networking protocol mainly used by
68011 Acorn computers to access file and print servers. It uses native
68012 diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68013 --- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68014 +++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68015 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68016 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68017 fib_sync_up(dev);
68018 #endif
68019 - atomic_inc(&net->ipv4.dev_addr_genid);
68020 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68021 rt_cache_flush(dev_net(dev), -1);
68022 break;
68023 case NETDEV_DOWN:
68024 fib_del_ifaddr(ifa, NULL);
68025 - atomic_inc(&net->ipv4.dev_addr_genid);
68026 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68027 if (ifa->ifa_dev->ifa_list == NULL) {
68028 /* Last address was deleted from this interface.
68029 * Disable IP.
68030 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68031 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68032 fib_sync_up(dev);
68033 #endif
68034 - atomic_inc(&net->ipv4.dev_addr_genid);
68035 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68036 rt_cache_flush(dev_net(dev), -1);
68037 break;
68038 case NETDEV_DOWN:
68039 diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68040 --- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
68041 +++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
68042 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68043 nh->nh_saddr = inet_select_addr(nh->nh_dev,
68044 nh->nh_gw,
68045 nh->nh_parent->fib_scope);
68046 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68047 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68048
68049 return nh->nh_saddr;
68050 }
68051 diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68052 --- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
68053 +++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
68054 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68055 r->idiag_retrans = 0;
68056
68057 r->id.idiag_if = sk->sk_bound_dev_if;
68058 +
68059 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68060 + r->id.idiag_cookie[0] = 0;
68061 + r->id.idiag_cookie[1] = 0;
68062 +#else
68063 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68064 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68065 +#endif
68066
68067 r->id.idiag_sport = inet->inet_sport;
68068 r->id.idiag_dport = inet->inet_dport;
68069 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68070 r->idiag_family = tw->tw_family;
68071 r->idiag_retrans = 0;
68072 r->id.idiag_if = tw->tw_bound_dev_if;
68073 +
68074 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68075 + r->id.idiag_cookie[0] = 0;
68076 + r->id.idiag_cookie[1] = 0;
68077 +#else
68078 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68079 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68080 +#endif
68081 +
68082 r->id.idiag_sport = tw->tw_sport;
68083 r->id.idiag_dport = tw->tw_dport;
68084 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68085 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68086 if (sk == NULL)
68087 goto unlock;
68088
68089 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68090 err = -ESTALE;
68091 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68092 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68093 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68094 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68095 goto out;
68096 +#endif
68097
68098 err = -ENOMEM;
68099 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68100 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68101 r->idiag_retrans = req->retrans;
68102
68103 r->id.idiag_if = sk->sk_bound_dev_if;
68104 +
68105 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68106 + r->id.idiag_cookie[0] = 0;
68107 + r->id.idiag_cookie[1] = 0;
68108 +#else
68109 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68110 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68111 +#endif
68112
68113 tmo = req->expires - jiffies;
68114 if (tmo < 0)
68115 diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68116 --- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
68117 +++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
68118 @@ -18,12 +18,15 @@
68119 #include <linux/sched.h>
68120 #include <linux/slab.h>
68121 #include <linux/wait.h>
68122 +#include <linux/security.h>
68123
68124 #include <net/inet_connection_sock.h>
68125 #include <net/inet_hashtables.h>
68126 #include <net/secure_seq.h>
68127 #include <net/ip.h>
68128
68129 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68130 +
68131 /*
68132 * Allocate and initialize a new local port bind bucket.
68133 * The bindhash mutex for snum's hash chain must be held here.
68134 @@ -530,6 +533,8 @@ ok:
68135 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68136 spin_unlock(&head->lock);
68137
68138 + gr_update_task_in_ip_table(current, inet_sk(sk));
68139 +
68140 if (tw) {
68141 inet_twsk_deschedule(tw, death_row);
68142 while (twrefcnt) {
68143 diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68144 --- linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
68145 +++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
68146 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68147 unsigned int sequence;
68148 int invalidated, newrefcnt = 0;
68149
68150 + pax_track_stack();
68151 +
68152 /* Look up for the address quickly, lockless.
68153 * Because of a concurrent writer, we might not find an existing entry.
68154 */
68155 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
68156 if (p) {
68157 p->daddr = *daddr;
68158 atomic_set(&p->refcnt, 1);
68159 - atomic_set(&p->rid, 0);
68160 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68161 + atomic_set_unchecked(&p->rid, 0);
68162 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68163 p->tcp_ts_stamp = 0;
68164 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68165 p->rate_tokens = 0;
68166 diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68167 --- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
68168 +++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
68169 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68170 return 0;
68171
68172 start = qp->rid;
68173 - end = atomic_inc_return(&peer->rid);
68174 + end = atomic_inc_return_unchecked(&peer->rid);
68175 qp->rid = end;
68176
68177 rc = qp->q.fragments && (end - start) > max;
68178 diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68179 --- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68180 +++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68181 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
68182 int val;
68183 int len;
68184
68185 + pax_track_stack();
68186 +
68187 if (level != SOL_IP)
68188 return -EOPNOTSUPP;
68189
68190 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
68191 len = min_t(unsigned int, len, opt->optlen);
68192 if (put_user(len, optlen))
68193 return -EFAULT;
68194 - if (copy_to_user(optval, opt->__data, len))
68195 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
68196 + copy_to_user(optval, opt->__data, len))
68197 return -EFAULT;
68198 return 0;
68199 }
68200 diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
68201 --- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
68202 +++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
68203 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
68204
68205 *len = 0;
68206
68207 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68208 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68209 if (*octets == NULL) {
68210 if (net_ratelimit())
68211 pr_notice("OOM in bsalg (%d)\n", __LINE__);
68212 diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
68213 --- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
68214 +++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
68215 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
68216 sk_rmem_alloc_get(sp),
68217 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68218 atomic_read(&sp->sk_refcnt), sp,
68219 - atomic_read(&sp->sk_drops), len);
68220 + atomic_read_unchecked(&sp->sk_drops), len);
68221 }
68222
68223 static int ping_seq_show(struct seq_file *seq, void *v)
68224 diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
68225 --- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
68226 +++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
68227 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
68228 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68229 {
68230 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68231 - atomic_inc(&sk->sk_drops);
68232 + atomic_inc_unchecked(&sk->sk_drops);
68233 kfree_skb(skb);
68234 return NET_RX_DROP;
68235 }
68236 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
68237
68238 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68239 {
68240 + struct icmp_filter filter;
68241 +
68242 if (optlen > sizeof(struct icmp_filter))
68243 optlen = sizeof(struct icmp_filter);
68244 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68245 + if (copy_from_user(&filter, optval, optlen))
68246 return -EFAULT;
68247 + raw_sk(sk)->filter = filter;
68248 return 0;
68249 }
68250
68251 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68252 {
68253 int len, ret = -EFAULT;
68254 + struct icmp_filter filter;
68255
68256 if (get_user(len, optlen))
68257 goto out;
68258 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
68259 if (len > sizeof(struct icmp_filter))
68260 len = sizeof(struct icmp_filter);
68261 ret = -EFAULT;
68262 - if (put_user(len, optlen) ||
68263 - copy_to_user(optval, &raw_sk(sk)->filter, len))
68264 + filter = raw_sk(sk)->filter;
68265 + if (put_user(len, optlen) || len > sizeof filter ||
68266 + copy_to_user(optval, &filter, len))
68267 goto out;
68268 ret = 0;
68269 out: return ret;
68270 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
68271 sk_wmem_alloc_get(sp),
68272 sk_rmem_alloc_get(sp),
68273 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68274 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68275 + atomic_read(&sp->sk_refcnt),
68276 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68277 + NULL,
68278 +#else
68279 + sp,
68280 +#endif
68281 + atomic_read_unchecked(&sp->sk_drops));
68282 }
68283
68284 static int raw_seq_show(struct seq_file *seq, void *v)
68285 diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
68286 --- linux-3.0.4/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
68287 +++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
68288 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
68289
68290 static inline int rt_genid(struct net *net)
68291 {
68292 - return atomic_read(&net->ipv4.rt_genid);
68293 + return atomic_read_unchecked(&net->ipv4.rt_genid);
68294 }
68295
68296 #ifdef CONFIG_PROC_FS
68297 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
68298 unsigned char shuffle;
68299
68300 get_random_bytes(&shuffle, sizeof(shuffle));
68301 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68302 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68303 }
68304
68305 /*
68306 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
68307 error = rt->dst.error;
68308 if (peer) {
68309 inet_peer_refcheck(rt->peer);
68310 - id = atomic_read(&peer->ip_id_count) & 0xffff;
68311 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
68312 if (peer->tcp_ts_stamp) {
68313 ts = peer->tcp_ts;
68314 tsage = get_seconds() - peer->tcp_ts_stamp;
68315 diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
68316 --- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
68317 +++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
68318 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
68319 int val;
68320 int err = 0;
68321
68322 + pax_track_stack();
68323 +
68324 /* These are data/string values, all the others are ints */
68325 switch (optname) {
68326 case TCP_CONGESTION: {
68327 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
68328 struct tcp_sock *tp = tcp_sk(sk);
68329 int val, len;
68330
68331 + pax_track_stack();
68332 +
68333 if (get_user(len, optlen))
68334 return -EFAULT;
68335
68336 diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
68337 --- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
68338 +++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
68339 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
68340 int sysctl_tcp_low_latency __read_mostly;
68341 EXPORT_SYMBOL(sysctl_tcp_low_latency);
68342
68343 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68344 +extern int grsec_enable_blackhole;
68345 +#endif
68346
68347 #ifdef CONFIG_TCP_MD5SIG
68348 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68349 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68350 return 0;
68351
68352 reset:
68353 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68354 + if (!grsec_enable_blackhole)
68355 +#endif
68356 tcp_v4_send_reset(rsk, skb);
68357 discard:
68358 kfree_skb(skb);
68359 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
68360 TCP_SKB_CB(skb)->sacked = 0;
68361
68362 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68363 - if (!sk)
68364 + if (!sk) {
68365 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68366 + ret = 1;
68367 +#endif
68368 goto no_tcp_socket;
68369 -
68370 + }
68371 process:
68372 - if (sk->sk_state == TCP_TIME_WAIT)
68373 + if (sk->sk_state == TCP_TIME_WAIT) {
68374 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68375 + ret = 2;
68376 +#endif
68377 goto do_time_wait;
68378 + }
68379
68380 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
68381 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68382 @@ -1724,6 +1737,10 @@ no_tcp_socket:
68383 bad_packet:
68384 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68385 } else {
68386 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68387 + if (!grsec_enable_blackhole || (ret == 1 &&
68388 + (skb->dev->flags & IFF_LOOPBACK)))
68389 +#endif
68390 tcp_v4_send_reset(NULL, skb);
68391 }
68392
68393 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
68394 0, /* non standard timer */
68395 0, /* open_requests have no inode */
68396 atomic_read(&sk->sk_refcnt),
68397 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68398 + NULL,
68399 +#else
68400 req,
68401 +#endif
68402 len);
68403 }
68404
68405 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
68406 sock_i_uid(sk),
68407 icsk->icsk_probes_out,
68408 sock_i_ino(sk),
68409 - atomic_read(&sk->sk_refcnt), sk,
68410 + atomic_read(&sk->sk_refcnt),
68411 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68412 + NULL,
68413 +#else
68414 + sk,
68415 +#endif
68416 jiffies_to_clock_t(icsk->icsk_rto),
68417 jiffies_to_clock_t(icsk->icsk_ack.ato),
68418 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68419 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
68420 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
68421 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68422 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68423 - atomic_read(&tw->tw_refcnt), tw, len);
68424 + atomic_read(&tw->tw_refcnt),
68425 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68426 + NULL,
68427 +#else
68428 + tw,
68429 +#endif
68430 + len);
68431 }
68432
68433 #define TMPSZ 150
68434 diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
68435 --- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
68436 +++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
68437 @@ -27,6 +27,10 @@
68438 #include <net/inet_common.h>
68439 #include <net/xfrm.h>
68440
68441 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68442 +extern int grsec_enable_blackhole;
68443 +#endif
68444 +
68445 int sysctl_tcp_syncookies __read_mostly = 1;
68446 EXPORT_SYMBOL(sysctl_tcp_syncookies);
68447
68448 @@ -745,6 +749,10 @@ listen_overflow:
68449
68450 embryonic_reset:
68451 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68452 +
68453 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68454 + if (!grsec_enable_blackhole)
68455 +#endif
68456 if (!(flg & TCP_FLAG_RST))
68457 req->rsk_ops->send_reset(sk, skb);
68458
68459 diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
68460 --- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
68461 +++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
68462 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
68463 int mss;
68464 int s_data_desired = 0;
68465
68466 + pax_track_stack();
68467 +
68468 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
68469 s_data_desired = cvp->s_data_desired;
68470 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
68471 diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
68472 --- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
68473 +++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
68474 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
68475 if (cnt + width >= len)
68476 break;
68477
68478 - if (copy_to_user(buf + cnt, tbuf, width))
68479 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68480 return -EFAULT;
68481 cnt += width;
68482 }
68483 diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
68484 --- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
68485 +++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
68486 @@ -22,6 +22,10 @@
68487 #include <linux/gfp.h>
68488 #include <net/tcp.h>
68489
68490 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68491 +extern int grsec_lastack_retries;
68492 +#endif
68493 +
68494 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68495 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68496 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68497 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
68498 }
68499 }
68500
68501 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68502 + if ((sk->sk_state == TCP_LAST_ACK) &&
68503 + (grsec_lastack_retries > 0) &&
68504 + (grsec_lastack_retries < retry_until))
68505 + retry_until = grsec_lastack_retries;
68506 +#endif
68507 +
68508 if (retransmits_timed_out(sk, retry_until,
68509 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
68510 /* Has it gone just too far? */
68511 diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
68512 --- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
68513 +++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
68514 @@ -86,6 +86,7 @@
68515 #include <linux/types.h>
68516 #include <linux/fcntl.h>
68517 #include <linux/module.h>
68518 +#include <linux/security.h>
68519 #include <linux/socket.h>
68520 #include <linux/sockios.h>
68521 #include <linux/igmp.h>
68522 @@ -107,6 +108,10 @@
68523 #include <net/xfrm.h>
68524 #include "udp_impl.h"
68525
68526 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68527 +extern int grsec_enable_blackhole;
68528 +#endif
68529 +
68530 struct udp_table udp_table __read_mostly;
68531 EXPORT_SYMBOL(udp_table);
68532
68533 @@ -564,6 +569,9 @@ found:
68534 return s;
68535 }
68536
68537 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68538 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68539 +
68540 /*
68541 * This routine is called by the ICMP module when it gets some
68542 * sort of error condition. If err < 0 then the socket should
68543 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68544 dport = usin->sin_port;
68545 if (dport == 0)
68546 return -EINVAL;
68547 +
68548 + err = gr_search_udp_sendmsg(sk, usin);
68549 + if (err)
68550 + return err;
68551 } else {
68552 if (sk->sk_state != TCP_ESTABLISHED)
68553 return -EDESTADDRREQ;
68554 +
68555 + err = gr_search_udp_sendmsg(sk, NULL);
68556 + if (err)
68557 + return err;
68558 +
68559 daddr = inet->inet_daddr;
68560 dport = inet->inet_dport;
68561 /* Open fast path for connected socket.
68562 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
68563 udp_lib_checksum_complete(skb)) {
68564 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68565 IS_UDPLITE(sk));
68566 - atomic_inc(&sk->sk_drops);
68567 + atomic_inc_unchecked(&sk->sk_drops);
68568 __skb_unlink(skb, rcvq);
68569 __skb_queue_tail(&list_kill, skb);
68570 }
68571 @@ -1184,6 +1201,10 @@ try_again:
68572 if (!skb)
68573 goto out;
68574
68575 + err = gr_search_udp_recvmsg(sk, skb);
68576 + if (err)
68577 + goto out_free;
68578 +
68579 ulen = skb->len - sizeof(struct udphdr);
68580 if (len > ulen)
68581 len = ulen;
68582 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
68583
68584 drop:
68585 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68586 - atomic_inc(&sk->sk_drops);
68587 + atomic_inc_unchecked(&sk->sk_drops);
68588 kfree_skb(skb);
68589 return -1;
68590 }
68591 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
68592 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
68593
68594 if (!skb1) {
68595 - atomic_inc(&sk->sk_drops);
68596 + atomic_inc_unchecked(&sk->sk_drops);
68597 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68598 IS_UDPLITE(sk));
68599 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68600 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68601 goto csum_error;
68602
68603 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68604 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68605 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68606 +#endif
68607 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68608
68609 /*
68610 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
68611 sk_wmem_alloc_get(sp),
68612 sk_rmem_alloc_get(sp),
68613 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68614 - atomic_read(&sp->sk_refcnt), sp,
68615 - atomic_read(&sp->sk_drops), len);
68616 + atomic_read(&sp->sk_refcnt),
68617 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68618 + NULL,
68619 +#else
68620 + sp,
68621 +#endif
68622 + atomic_read_unchecked(&sp->sk_drops), len);
68623 }
68624
68625 int udp4_seq_show(struct seq_file *seq, void *v)
68626 diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
68627 --- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
68628 +++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
68629 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
68630 #ifdef CONFIG_XFRM
68631 {
68632 struct rt6_info *rt = (struct rt6_info *)dst;
68633 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68634 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68635 }
68636 #endif
68637 }
68638 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
68639 #ifdef CONFIG_XFRM
68640 if (dst) {
68641 struct rt6_info *rt = (struct rt6_info *)dst;
68642 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68643 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68644 __sk_dst_reset(sk);
68645 dst = NULL;
68646 }
68647 diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
68648 --- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68649 +++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68650 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
68651 int val, valbool;
68652 int retv = -ENOPROTOOPT;
68653
68654 + pax_track_stack();
68655 +
68656 if (optval == NULL)
68657 val=0;
68658 else {
68659 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
68660 int len;
68661 int val;
68662
68663 + pax_track_stack();
68664 +
68665 if (ip6_mroute_opt(optname))
68666 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68667
68668 diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
68669 --- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
68670 +++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
68671 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
68672 {
68673 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
68674 skb_checksum_complete(skb)) {
68675 - atomic_inc(&sk->sk_drops);
68676 + atomic_inc_unchecked(&sk->sk_drops);
68677 kfree_skb(skb);
68678 return NET_RX_DROP;
68679 }
68680 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68681 struct raw6_sock *rp = raw6_sk(sk);
68682
68683 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68684 - atomic_inc(&sk->sk_drops);
68685 + atomic_inc_unchecked(&sk->sk_drops);
68686 kfree_skb(skb);
68687 return NET_RX_DROP;
68688 }
68689 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68690
68691 if (inet->hdrincl) {
68692 if (skb_checksum_complete(skb)) {
68693 - atomic_inc(&sk->sk_drops);
68694 + atomic_inc_unchecked(&sk->sk_drops);
68695 kfree_skb(skb);
68696 return NET_RX_DROP;
68697 }
68698 @@ -601,7 +601,7 @@ out:
68699 return err;
68700 }
68701
68702 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68703 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68704 struct flowi6 *fl6, struct dst_entry **dstp,
68705 unsigned int flags)
68706 {
68707 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
68708 u16 proto;
68709 int err;
68710
68711 + pax_track_stack();
68712 +
68713 /* Rough check on arithmetic overflow,
68714 better check is made in ip6_append_data().
68715 */
68716 @@ -909,12 +911,15 @@ do_confirm:
68717 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68718 char __user *optval, int optlen)
68719 {
68720 + struct icmp6_filter filter;
68721 +
68722 switch (optname) {
68723 case ICMPV6_FILTER:
68724 if (optlen > sizeof(struct icmp6_filter))
68725 optlen = sizeof(struct icmp6_filter);
68726 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68727 + if (copy_from_user(&filter, optval, optlen))
68728 return -EFAULT;
68729 + raw6_sk(sk)->filter = filter;
68730 return 0;
68731 default:
68732 return -ENOPROTOOPT;
68733 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
68734 char __user *optval, int __user *optlen)
68735 {
68736 int len;
68737 + struct icmp6_filter filter;
68738
68739 switch (optname) {
68740 case ICMPV6_FILTER:
68741 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
68742 len = sizeof(struct icmp6_filter);
68743 if (put_user(len, optlen))
68744 return -EFAULT;
68745 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68746 + filter = raw6_sk(sk)->filter;
68747 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
68748 return -EFAULT;
68749 return 0;
68750 default:
68751 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
68752 0, 0L, 0,
68753 sock_i_uid(sp), 0,
68754 sock_i_ino(sp),
68755 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68756 + atomic_read(&sp->sk_refcnt),
68757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68758 + NULL,
68759 +#else
68760 + sp,
68761 +#endif
68762 + atomic_read_unchecked(&sp->sk_drops));
68763 }
68764
68765 static int raw6_seq_show(struct seq_file *seq, void *v)
68766 diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
68767 --- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
68768 +++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
68769 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68770 }
68771 #endif
68772
68773 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68774 +extern int grsec_enable_blackhole;
68775 +#endif
68776 +
68777 static void tcp_v6_hash(struct sock *sk)
68778 {
68779 if (sk->sk_state != TCP_CLOSE) {
68780 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68781 return 0;
68782
68783 reset:
68784 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68785 + if (!grsec_enable_blackhole)
68786 +#endif
68787 tcp_v6_send_reset(sk, skb);
68788 discard:
68789 if (opt_skb)
68790 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68791 TCP_SKB_CB(skb)->sacked = 0;
68792
68793 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68794 - if (!sk)
68795 + if (!sk) {
68796 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68797 + ret = 1;
68798 +#endif
68799 goto no_tcp_socket;
68800 + }
68801
68802 process:
68803 - if (sk->sk_state == TCP_TIME_WAIT)
68804 + if (sk->sk_state == TCP_TIME_WAIT) {
68805 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68806 + ret = 2;
68807 +#endif
68808 goto do_time_wait;
68809 + }
68810
68811 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
68812 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68813 @@ -1794,6 +1809,10 @@ no_tcp_socket:
68814 bad_packet:
68815 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68816 } else {
68817 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68818 + if (!grsec_enable_blackhole || (ret == 1 &&
68819 + (skb->dev->flags & IFF_LOOPBACK)))
68820 +#endif
68821 tcp_v6_send_reset(NULL, skb);
68822 }
68823
68824 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
68825 uid,
68826 0, /* non standard timer */
68827 0, /* open_requests have no inode */
68828 - 0, req);
68829 + 0,
68830 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68831 + NULL
68832 +#else
68833 + req
68834 +#endif
68835 + );
68836 }
68837
68838 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68839 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
68840 sock_i_uid(sp),
68841 icsk->icsk_probes_out,
68842 sock_i_ino(sp),
68843 - atomic_read(&sp->sk_refcnt), sp,
68844 + atomic_read(&sp->sk_refcnt),
68845 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68846 + NULL,
68847 +#else
68848 + sp,
68849 +#endif
68850 jiffies_to_clock_t(icsk->icsk_rto),
68851 jiffies_to_clock_t(icsk->icsk_ack.ato),
68852 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68853 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
68854 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68855 tw->tw_substate, 0, 0,
68856 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68857 - atomic_read(&tw->tw_refcnt), tw);
68858 + atomic_read(&tw->tw_refcnt),
68859 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68860 + NULL
68861 +#else
68862 + tw
68863 +#endif
68864 + );
68865 }
68866
68867 static int tcp6_seq_show(struct seq_file *seq, void *v)
68868 diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
68869 --- linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
68870 +++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
68871 @@ -50,6 +50,10 @@
68872 #include <linux/seq_file.h>
68873 #include "udp_impl.h"
68874
68875 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68876 +extern int grsec_enable_blackhole;
68877 +#endif
68878 +
68879 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68880 {
68881 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68882 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68883
68884 return 0;
68885 drop:
68886 - atomic_inc(&sk->sk_drops);
68887 + atomic_inc_unchecked(&sk->sk_drops);
68888 drop_no_sk_drops_inc:
68889 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68890 kfree_skb(skb);
68891 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
68892 continue;
68893 }
68894 drop:
68895 - atomic_inc(&sk->sk_drops);
68896 + atomic_inc_unchecked(&sk->sk_drops);
68897 UDP6_INC_STATS_BH(sock_net(sk),
68898 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
68899 UDP6_INC_STATS_BH(sock_net(sk),
68900 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68901 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68902 proto == IPPROTO_UDPLITE);
68903
68904 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68905 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68906 +#endif
68907 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
68908
68909 kfree_skb(skb);
68910 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68911 if (!sock_owned_by_user(sk))
68912 udpv6_queue_rcv_skb(sk, skb);
68913 else if (sk_add_backlog(sk, skb)) {
68914 - atomic_inc(&sk->sk_drops);
68915 + atomic_inc_unchecked(&sk->sk_drops);
68916 bh_unlock_sock(sk);
68917 sock_put(sk);
68918 goto discard;
68919 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
68920 0, 0L, 0,
68921 sock_i_uid(sp), 0,
68922 sock_i_ino(sp),
68923 - atomic_read(&sp->sk_refcnt), sp,
68924 - atomic_read(&sp->sk_drops));
68925 + atomic_read(&sp->sk_refcnt),
68926 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68927 + NULL,
68928 +#else
68929 + sp,
68930 +#endif
68931 + atomic_read_unchecked(&sp->sk_drops));
68932 }
68933
68934 int udp6_seq_show(struct seq_file *seq, void *v)
68935 diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
68936 --- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
68937 +++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
68938 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
68939 add_wait_queue(&self->open_wait, &wait);
68940
68941 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68942 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68943 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68944
68945 /* As far as I can see, we protect open_count - Jean II */
68946 spin_lock_irqsave(&self->spinlock, flags);
68947 if (!tty_hung_up_p(filp)) {
68948 extra_count = 1;
68949 - self->open_count--;
68950 + local_dec(&self->open_count);
68951 }
68952 spin_unlock_irqrestore(&self->spinlock, flags);
68953 - self->blocked_open++;
68954 + local_inc(&self->blocked_open);
68955
68956 while (1) {
68957 if (tty->termios->c_cflag & CBAUD) {
68958 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
68959 }
68960
68961 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68962 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68963 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68964
68965 schedule();
68966 }
68967 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
68968 if (extra_count) {
68969 /* ++ is not atomic, so this should be protected - Jean II */
68970 spin_lock_irqsave(&self->spinlock, flags);
68971 - self->open_count++;
68972 + local_inc(&self->open_count);
68973 spin_unlock_irqrestore(&self->spinlock, flags);
68974 }
68975 - self->blocked_open--;
68976 + local_dec(&self->blocked_open);
68977
68978 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68979 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68980 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68981
68982 if (!retval)
68983 self->flags |= ASYNC_NORMAL_ACTIVE;
68984 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
68985 }
68986 /* ++ is not atomic, so this should be protected - Jean II */
68987 spin_lock_irqsave(&self->spinlock, flags);
68988 - self->open_count++;
68989 + local_inc(&self->open_count);
68990
68991 tty->driver_data = self;
68992 self->tty = tty;
68993 spin_unlock_irqrestore(&self->spinlock, flags);
68994
68995 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68996 - self->line, self->open_count);
68997 + self->line, local_read(&self->open_count));
68998
68999 /* Not really used by us, but lets do it anyway */
69000 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69001 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69002 return;
69003 }
69004
69005 - if ((tty->count == 1) && (self->open_count != 1)) {
69006 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69007 /*
69008 * Uh, oh. tty->count is 1, which means that the tty
69009 * structure will be freed. state->count should always
69010 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69011 */
69012 IRDA_DEBUG(0, "%s(), bad serial port count; "
69013 "tty->count is 1, state->count is %d\n", __func__ ,
69014 - self->open_count);
69015 - self->open_count = 1;
69016 + local_read(&self->open_count));
69017 + local_set(&self->open_count, 1);
69018 }
69019
69020 - if (--self->open_count < 0) {
69021 + if (local_dec_return(&self->open_count) < 0) {
69022 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69023 - __func__, self->line, self->open_count);
69024 - self->open_count = 0;
69025 + __func__, self->line, local_read(&self->open_count));
69026 + local_set(&self->open_count, 0);
69027 }
69028 - if (self->open_count) {
69029 + if (local_read(&self->open_count)) {
69030 spin_unlock_irqrestore(&self->spinlock, flags);
69031
69032 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69033 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69034 tty->closing = 0;
69035 self->tty = NULL;
69036
69037 - if (self->blocked_open) {
69038 + if (local_read(&self->blocked_open)) {
69039 if (self->close_delay)
69040 schedule_timeout_interruptible(self->close_delay);
69041 wake_up_interruptible(&self->open_wait);
69042 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69043 spin_lock_irqsave(&self->spinlock, flags);
69044 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69045 self->tty = NULL;
69046 - self->open_count = 0;
69047 + local_set(&self->open_count, 0);
69048 spin_unlock_irqrestore(&self->spinlock, flags);
69049
69050 wake_up_interruptible(&self->open_wait);
69051 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
69052 seq_putc(m, '\n');
69053
69054 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69055 - seq_printf(m, "Open count: %d\n", self->open_count);
69056 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69057 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69058 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69059
69060 diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69061 --- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
69062 +++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
69063 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69064
69065 write_lock_bh(&iucv_sk_list.lock);
69066
69067 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69068 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69069 while (__iucv_get_sock_by_name(name)) {
69070 sprintf(name, "%08x",
69071 - atomic_inc_return(&iucv_sk_list.autobind_name));
69072 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69073 }
69074
69075 write_unlock_bh(&iucv_sk_list.lock);
69076 diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69077 --- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
69078 +++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
69079 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69080 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69081 struct xfrm_kmaddress k;
69082
69083 + pax_track_stack();
69084 +
69085 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69086 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69087 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69088 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69089 static u32 get_acqseq(void)
69090 {
69091 u32 res;
69092 - static atomic_t acqseq;
69093 + static atomic_unchecked_t acqseq;
69094
69095 do {
69096 - res = atomic_inc_return(&acqseq);
69097 + res = atomic_inc_return_unchecked(&acqseq);
69098 } while (!res);
69099 return res;
69100 }
69101 diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69102 --- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
69103 +++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
69104 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69105 goto out;
69106
69107 lapb->dev = dev;
69108 - lapb->callbacks = *callbacks;
69109 + lapb->callbacks = callbacks;
69110
69111 __lapb_insert_cb(lapb);
69112
69113 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69114
69115 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69116 {
69117 - if (lapb->callbacks.connect_confirmation)
69118 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
69119 + if (lapb->callbacks->connect_confirmation)
69120 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
69121 }
69122
69123 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69124 {
69125 - if (lapb->callbacks.connect_indication)
69126 - lapb->callbacks.connect_indication(lapb->dev, reason);
69127 + if (lapb->callbacks->connect_indication)
69128 + lapb->callbacks->connect_indication(lapb->dev, reason);
69129 }
69130
69131 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69132 {
69133 - if (lapb->callbacks.disconnect_confirmation)
69134 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69135 + if (lapb->callbacks->disconnect_confirmation)
69136 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69137 }
69138
69139 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69140 {
69141 - if (lapb->callbacks.disconnect_indication)
69142 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
69143 + if (lapb->callbacks->disconnect_indication)
69144 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
69145 }
69146
69147 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69148 {
69149 - if (lapb->callbacks.data_indication)
69150 - return lapb->callbacks.data_indication(lapb->dev, skb);
69151 + if (lapb->callbacks->data_indication)
69152 + return lapb->callbacks->data_indication(lapb->dev, skb);
69153
69154 kfree_skb(skb);
69155 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69156 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69157 {
69158 int used = 0;
69159
69160 - if (lapb->callbacks.data_transmit) {
69161 - lapb->callbacks.data_transmit(lapb->dev, skb);
69162 + if (lapb->callbacks->data_transmit) {
69163 + lapb->callbacks->data_transmit(lapb->dev, skb);
69164 used = 1;
69165 }
69166
69167 diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69168 --- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
69169 +++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
69170 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69171 struct tid_ampdu_rx *tid_rx;
69172 struct tid_ampdu_tx *tid_tx;
69173
69174 + pax_track_stack();
69175 +
69176 rcu_read_lock();
69177
69178 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69179 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69180 struct sta_info *sta = file->private_data;
69181 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69182
69183 + pax_track_stack();
69184 +
69185 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69186 htc->ht_supported ? "" : "not ");
69187 if (htc->ht_supported) {
69188 diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69189 --- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
69190 +++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
69191 @@ -27,6 +27,7 @@
69192 #include <net/ieee80211_radiotap.h>
69193 #include <net/cfg80211.h>
69194 #include <net/mac80211.h>
69195 +#include <asm/local.h>
69196 #include "key.h"
69197 #include "sta_info.h"
69198
69199 @@ -721,7 +722,7 @@ struct ieee80211_local {
69200 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69201 spinlock_t queue_stop_reason_lock;
69202
69203 - int open_count;
69204 + local_t open_count;
69205 int monitors, cooked_mntrs;
69206 /* number of interfaces with corresponding FIF_ flags */
69207 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
69208 diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
69209 --- linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
69210 +++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
69211 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
69212 break;
69213 }
69214
69215 - if (local->open_count == 0) {
69216 + if (local_read(&local->open_count) == 0) {
69217 res = drv_start(local);
69218 if (res)
69219 goto err_del_bss;
69220 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
69221 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
69222
69223 if (!is_valid_ether_addr(dev->dev_addr)) {
69224 - if (!local->open_count)
69225 + if (!local_read(&local->open_count))
69226 drv_stop(local);
69227 return -EADDRNOTAVAIL;
69228 }
69229 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
69230 mutex_unlock(&local->mtx);
69231
69232 if (coming_up)
69233 - local->open_count++;
69234 + local_inc(&local->open_count);
69235
69236 if (hw_reconf_flags) {
69237 ieee80211_hw_config(local, hw_reconf_flags);
69238 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
69239 err_del_interface:
69240 drv_remove_interface(local, &sdata->vif);
69241 err_stop:
69242 - if (!local->open_count)
69243 + if (!local_read(&local->open_count))
69244 drv_stop(local);
69245 err_del_bss:
69246 sdata->bss = NULL;
69247 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
69248 }
69249
69250 if (going_down)
69251 - local->open_count--;
69252 + local_dec(&local->open_count);
69253
69254 switch (sdata->vif.type) {
69255 case NL80211_IFTYPE_AP_VLAN:
69256 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
69257
69258 ieee80211_recalc_ps(local, -1);
69259
69260 - if (local->open_count == 0) {
69261 + if (local_read(&local->open_count) == 0) {
69262 if (local->ops->napi_poll)
69263 napi_disable(&local->napi);
69264 ieee80211_clear_tx_pending(local);
69265 diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
69266 --- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
69267 +++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
69268 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
69269 local->hw.conf.power_level = power;
69270 }
69271
69272 - if (changed && local->open_count) {
69273 + if (changed && local_read(&local->open_count)) {
69274 ret = drv_config(local, changed);
69275 /*
69276 * Goal:
69277 diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
69278 --- linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
69279 +++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
69280 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
69281 bool have_higher_than_11mbit = false;
69282 u16 ap_ht_cap_flags;
69283
69284 + pax_track_stack();
69285 +
69286 /* AssocResp and ReassocResp have identical structure */
69287
69288 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
69289 diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
69290 --- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
69291 +++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
69292 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
69293 cancel_work_sync(&local->dynamic_ps_enable_work);
69294 del_timer_sync(&local->dynamic_ps_timer);
69295
69296 - local->wowlan = wowlan && local->open_count;
69297 + local->wowlan = wowlan && local_read(&local->open_count);
69298 if (local->wowlan) {
69299 int err = drv_suspend(local, wowlan);
69300 if (err) {
69301 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
69302 }
69303
69304 /* stop hardware - this must stop RX */
69305 - if (local->open_count)
69306 + if (local_read(&local->open_count))
69307 ieee80211_stop_device(local);
69308
69309 suspend:
69310 diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
69311 --- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
69312 +++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
69313 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69314
69315 ASSERT_RTNL();
69316
69317 - if (local->open_count)
69318 + if (local_read(&local->open_count))
69319 return -EBUSY;
69320
69321 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
69322 diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
69323 --- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
69324 +++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
69325 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
69326
69327 spin_unlock_irqrestore(&events->lock, status);
69328
69329 - if (copy_to_user(buf, pb, p))
69330 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
69331 return -EFAULT;
69332
69333 return p;
69334 diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
69335 --- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
69336 +++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
69337 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
69338 #endif
69339
69340 /* restart hardware */
69341 - if (local->open_count) {
69342 + if (local_read(&local->open_count)) {
69343 /*
69344 * Upon resume hardware can sometimes be goofy due to
69345 * various platform / driver / bus issues, so restarting
69346 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
69347 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
69348 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
69349 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69350 /* Increase the refcnt counter of the dest */
69351 atomic_inc(&dest->refcnt);
69352
69353 - conn_flags = atomic_read(&dest->conn_flags);
69354 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
69355 if (cp->protocol != IPPROTO_UDP)
69356 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
69357 /* Bind with the destination and its corresponding transmitter */
69358 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
69359 atomic_set(&cp->refcnt, 1);
69360
69361 atomic_set(&cp->n_control, 0);
69362 - atomic_set(&cp->in_pkts, 0);
69363 + atomic_set_unchecked(&cp->in_pkts, 0);
69364
69365 atomic_inc(&ipvs->conn_count);
69366 if (flags & IP_VS_CONN_F_NO_CPORT)
69367 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
69368
69369 /* Don't drop the entry if its number of incoming packets is not
69370 located in [0, 8] */
69371 - i = atomic_read(&cp->in_pkts);
69372 + i = atomic_read_unchecked(&cp->in_pkts);
69373 if (i > 8 || i < 0) return 0;
69374
69375 if (!todrop_rate[i]) return 0;
69376 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
69377 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
69378 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
69379 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69380 ret = cp->packet_xmit(skb, cp, pd->pp);
69381 /* do not touch skb anymore */
69382
69383 - atomic_inc(&cp->in_pkts);
69384 + atomic_inc_unchecked(&cp->in_pkts);
69385 ip_vs_conn_put(cp);
69386 return ret;
69387 }
69388 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69389 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
69390 pkts = sysctl_sync_threshold(ipvs);
69391 else
69392 - pkts = atomic_add_return(1, &cp->in_pkts);
69393 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69394
69395 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
69396 cp->protocol == IPPROTO_SCTP) {
69397 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
69398 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
69399 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
69400 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
69401 ip_vs_rs_hash(ipvs, dest);
69402 write_unlock_bh(&ipvs->rs_lock);
69403 }
69404 - atomic_set(&dest->conn_flags, conn_flags);
69405 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
69406
69407 /* bind the service */
69408 if (!dest->svc) {
69409 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
69410 " %-7s %-6d %-10d %-10d\n",
69411 &dest->addr.in6,
69412 ntohs(dest->port),
69413 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69414 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69415 atomic_read(&dest->weight),
69416 atomic_read(&dest->activeconns),
69417 atomic_read(&dest->inactconns));
69418 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
69419 "%-7s %-6d %-10d %-10d\n",
69420 ntohl(dest->addr.ip),
69421 ntohs(dest->port),
69422 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69423 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69424 atomic_read(&dest->weight),
69425 atomic_read(&dest->activeconns),
69426 atomic_read(&dest->inactconns));
69427 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
69428 struct ip_vs_dest_user *udest_compat;
69429 struct ip_vs_dest_user_kern udest;
69430
69431 + pax_track_stack();
69432 +
69433 if (!capable(CAP_NET_ADMIN))
69434 return -EPERM;
69435
69436 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
69437
69438 entry.addr = dest->addr.ip;
69439 entry.port = dest->port;
69440 - entry.conn_flags = atomic_read(&dest->conn_flags);
69441 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69442 entry.weight = atomic_read(&dest->weight);
69443 entry.u_threshold = dest->u_threshold;
69444 entry.l_threshold = dest->l_threshold;
69445 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
69446 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69447
69448 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69449 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69450 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69451 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69452 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69453 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69454 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
69455 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
69456 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
69457 @@ -648,7 +648,7 @@ control:
69458 * i.e only increment in_pkts for Templates.
69459 */
69460 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
69461 - int pkts = atomic_add_return(1, &cp->in_pkts);
69462 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69463
69464 if (pkts % sysctl_sync_period(ipvs) != 1)
69465 return;
69466 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
69467
69468 if (opt)
69469 memcpy(&cp->in_seq, opt, sizeof(*opt));
69470 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69471 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69472 cp->state = state;
69473 cp->old_state = cp->state;
69474 /*
69475 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
69476 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
69477 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
69478 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69479 else
69480 rc = NF_ACCEPT;
69481 /* do not touch skb anymore */
69482 - atomic_inc(&cp->in_pkts);
69483 + atomic_inc_unchecked(&cp->in_pkts);
69484 goto out;
69485 }
69486
69487 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69488 else
69489 rc = NF_ACCEPT;
69490 /* do not touch skb anymore */
69491 - atomic_inc(&cp->in_pkts);
69492 + atomic_inc_unchecked(&cp->in_pkts);
69493 goto out;
69494 }
69495
69496 diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
69497 --- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
69498 +++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
69499 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
69500
69501 To compile it as a module, choose M here. If unsure, say N.
69502
69503 +config NETFILTER_XT_MATCH_GRADM
69504 + tristate '"gradm" match support'
69505 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69506 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69507 + ---help---
69508 + The gradm match allows to match on grsecurity RBAC being enabled.
69509 + It is useful when iptables rules are applied early on bootup to
69510 + prevent connections to the machine (except from a trusted host)
69511 + while the RBAC system is disabled.
69512 +
69513 config NETFILTER_XT_MATCH_HASHLIMIT
69514 tristate '"hashlimit" match support'
69515 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69516 diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
69517 --- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
69518 +++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
69519 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
69520 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
69521 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69522 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69523 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69524 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69525 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69526 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69527 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
69528 --- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
69529 +++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
69530 @@ -70,7 +70,7 @@ struct nfulnl_instance {
69531 };
69532
69533 static DEFINE_SPINLOCK(instances_lock);
69534 -static atomic_t global_seq;
69535 +static atomic_unchecked_t global_seq;
69536
69537 #define INSTANCE_BUCKETS 16
69538 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69539 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
69540 /* global sequence number */
69541 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69542 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69543 - htonl(atomic_inc_return(&global_seq)));
69544 + htonl(atomic_inc_return_unchecked(&global_seq)));
69545
69546 if (data_len) {
69547 struct nlattr *nla;
69548 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
69549 --- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
69550 +++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
69551 @@ -58,7 +58,7 @@ struct nfqnl_instance {
69552 */
69553 spinlock_t lock;
69554 unsigned int queue_total;
69555 - atomic_t id_sequence; /* 'sequence' of pkt ids */
69556 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
69557 struct list_head queue_list; /* packets in queue */
69558 };
69559
69560 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
69561 nfmsg->version = NFNETLINK_V0;
69562 nfmsg->res_id = htons(queue->queue_num);
69563
69564 - entry->id = atomic_inc_return(&queue->id_sequence);
69565 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
69566 pmsg.packet_id = htonl(entry->id);
69567 pmsg.hw_protocol = entskb->protocol;
69568 pmsg.hook = entry->hook;
69569 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
69570 inst->peer_pid, inst->queue_total,
69571 inst->copy_mode, inst->copy_range,
69572 inst->queue_dropped, inst->queue_user_dropped,
69573 - atomic_read(&inst->id_sequence), 1);
69574 + atomic_read_unchecked(&inst->id_sequence), 1);
69575 }
69576
69577 static const struct seq_operations nfqnl_seq_ops = {
69578 diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
69579 --- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69580 +++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
69581 @@ -0,0 +1,51 @@
69582 +/*
69583 + * gradm match for netfilter
69584 + * Copyright © Zbigniew Krzystolik, 2010
69585 + *
69586 + * This program is free software; you can redistribute it and/or modify
69587 + * it under the terms of the GNU General Public License; either version
69588 + * 2 or 3 as published by the Free Software Foundation.
69589 + */
69590 +#include <linux/module.h>
69591 +#include <linux/moduleparam.h>
69592 +#include <linux/skbuff.h>
69593 +#include <linux/netfilter/x_tables.h>
69594 +#include <linux/grsecurity.h>
69595 +#include <linux/netfilter/xt_gradm.h>
69596 +
69597 +static bool
69598 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
69599 +{
69600 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69601 + bool retval = false;
69602 + if (gr_acl_is_enabled())
69603 + retval = true;
69604 + return retval ^ info->invflags;
69605 +}
69606 +
69607 +static struct xt_match gradm_mt_reg __read_mostly = {
69608 + .name = "gradm",
69609 + .revision = 0,
69610 + .family = NFPROTO_UNSPEC,
69611 + .match = gradm_mt,
69612 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69613 + .me = THIS_MODULE,
69614 +};
69615 +
69616 +static int __init gradm_mt_init(void)
69617 +{
69618 + return xt_register_match(&gradm_mt_reg);
69619 +}
69620 +
69621 +static void __exit gradm_mt_exit(void)
69622 +{
69623 + xt_unregister_match(&gradm_mt_reg);
69624 +}
69625 +
69626 +module_init(gradm_mt_init);
69627 +module_exit(gradm_mt_exit);
69628 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69629 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69630 +MODULE_LICENSE("GPL");
69631 +MODULE_ALIAS("ipt_gradm");
69632 +MODULE_ALIAS("ip6t_gradm");
69633 diff -urNp linux-3.0.4/net/netfilter/xt_statistic.c linux-3.0.4/net/netfilter/xt_statistic.c
69634 --- linux-3.0.4/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
69635 +++ linux-3.0.4/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
69636 @@ -18,7 +18,7 @@
69637 #include <linux/netfilter/x_tables.h>
69638
69639 struct xt_statistic_priv {
69640 - atomic_t count;
69641 + atomic_unchecked_t count;
69642 } ____cacheline_aligned_in_smp;
69643
69644 MODULE_LICENSE("GPL");
69645 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
69646 break;
69647 case XT_STATISTIC_MODE_NTH:
69648 do {
69649 - oval = atomic_read(&info->master->count);
69650 + oval = atomic_read_unchecked(&info->master->count);
69651 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
69652 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
69653 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
69654 if (nval == 0)
69655 ret = !ret;
69656 break;
69657 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
69658 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
69659 if (info->master == NULL)
69660 return -ENOMEM;
69661 - atomic_set(&info->master->count, info->u.nth.count);
69662 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
69663
69664 return 0;
69665 }
69666 diff -urNp linux-3.0.4/net/netlink/af_netlink.c linux-3.0.4/net/netlink/af_netlink.c
69667 --- linux-3.0.4/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
69668 +++ linux-3.0.4/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
69669 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
69670 sk->sk_error_report(sk);
69671 }
69672 }
69673 - atomic_inc(&sk->sk_drops);
69674 + atomic_inc_unchecked(&sk->sk_drops);
69675 }
69676
69677 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69678 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
69679 sk_wmem_alloc_get(s),
69680 nlk->cb,
69681 atomic_read(&s->sk_refcnt),
69682 - atomic_read(&s->sk_drops),
69683 + atomic_read_unchecked(&s->sk_drops),
69684 sock_i_ino(s)
69685 );
69686
69687 diff -urNp linux-3.0.4/net/netrom/af_netrom.c linux-3.0.4/net/netrom/af_netrom.c
69688 --- linux-3.0.4/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
69689 +++ linux-3.0.4/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
69690 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
69691 struct sock *sk = sock->sk;
69692 struct nr_sock *nr = nr_sk(sk);
69693
69694 + memset(sax, 0, sizeof(*sax));
69695 lock_sock(sk);
69696 if (peer != 0) {
69697 if (sk->sk_state != TCP_ESTABLISHED) {
69698 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
69699 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69700 } else {
69701 sax->fsa_ax25.sax25_family = AF_NETROM;
69702 - sax->fsa_ax25.sax25_ndigis = 0;
69703 sax->fsa_ax25.sax25_call = nr->source_addr;
69704 *uaddr_len = sizeof(struct sockaddr_ax25);
69705 }
69706 diff -urNp linux-3.0.4/net/packet/af_packet.c linux-3.0.4/net/packet/af_packet.c
69707 --- linux-3.0.4/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
69708 +++ linux-3.0.4/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
69709 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
69710
69711 spin_lock(&sk->sk_receive_queue.lock);
69712 po->stats.tp_packets++;
69713 - skb->dropcount = atomic_read(&sk->sk_drops);
69714 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
69715 __skb_queue_tail(&sk->sk_receive_queue, skb);
69716 spin_unlock(&sk->sk_receive_queue.lock);
69717 sk->sk_data_ready(sk, skb->len);
69718 return 0;
69719
69720 drop_n_acct:
69721 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
69722 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
69723
69724 drop_n_restore:
69725 if (skb_head != skb->data && skb_shared(skb)) {
69726 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
69727 case PACKET_HDRLEN:
69728 if (len > sizeof(int))
69729 len = sizeof(int);
69730 - if (copy_from_user(&val, optval, len))
69731 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
69732 return -EFAULT;
69733 switch (val) {
69734 case TPACKET_V1:
69735 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
69736
69737 if (put_user(len, optlen))
69738 return -EFAULT;
69739 - if (copy_to_user(optval, data, len))
69740 + if (len > sizeof(st) || copy_to_user(optval, data, len))
69741 return -EFAULT;
69742 return 0;
69743 }
69744 diff -urNp linux-3.0.4/net/phonet/af_phonet.c linux-3.0.4/net/phonet/af_phonet.c
69745 --- linux-3.0.4/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
69746 +++ linux-3.0.4/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
69747 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69748 {
69749 struct phonet_protocol *pp;
69750
69751 - if (protocol >= PHONET_NPROTO)
69752 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69753 return NULL;
69754
69755 rcu_read_lock();
69756 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
69757 {
69758 int err = 0;
69759
69760 - if (protocol >= PHONET_NPROTO)
69761 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69762 return -EINVAL;
69763
69764 err = proto_register(pp->prot, 1);
69765 diff -urNp linux-3.0.4/net/phonet/pep.c linux-3.0.4/net/phonet/pep.c
69766 --- linux-3.0.4/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
69767 +++ linux-3.0.4/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
69768 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
69769
69770 case PNS_PEP_CTRL_REQ:
69771 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69772 - atomic_inc(&sk->sk_drops);
69773 + atomic_inc_unchecked(&sk->sk_drops);
69774 break;
69775 }
69776 __skb_pull(skb, 4);
69777 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
69778 }
69779
69780 if (pn->rx_credits == 0) {
69781 - atomic_inc(&sk->sk_drops);
69782 + atomic_inc_unchecked(&sk->sk_drops);
69783 err = -ENOBUFS;
69784 break;
69785 }
69786 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
69787 }
69788
69789 if (pn->rx_credits == 0) {
69790 - atomic_inc(&sk->sk_drops);
69791 + atomic_inc_unchecked(&sk->sk_drops);
69792 err = NET_RX_DROP;
69793 break;
69794 }
69795 diff -urNp linux-3.0.4/net/phonet/socket.c linux-3.0.4/net/phonet/socket.c
69796 --- linux-3.0.4/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
69797 +++ linux-3.0.4/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
69798 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
69799 pn->resource, sk->sk_state,
69800 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69801 sock_i_uid(sk), sock_i_ino(sk),
69802 - atomic_read(&sk->sk_refcnt), sk,
69803 - atomic_read(&sk->sk_drops), &len);
69804 + atomic_read(&sk->sk_refcnt),
69805 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69806 + NULL,
69807 +#else
69808 + sk,
69809 +#endif
69810 + atomic_read_unchecked(&sk->sk_drops), &len);
69811 }
69812 seq_printf(seq, "%*s\n", 127 - len, "");
69813 return 0;
69814 diff -urNp linux-3.0.4/net/rds/cong.c linux-3.0.4/net/rds/cong.c
69815 --- linux-3.0.4/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
69816 +++ linux-3.0.4/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
69817 @@ -77,7 +77,7 @@
69818 * finds that the saved generation number is smaller than the global generation
69819 * number, it wakes up the process.
69820 */
69821 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69822 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69823
69824 /*
69825 * Congestion monitoring
69826 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69827 rdsdebug("waking map %p for %pI4\n",
69828 map, &map->m_addr);
69829 rds_stats_inc(s_cong_update_received);
69830 - atomic_inc(&rds_cong_generation);
69831 + atomic_inc_unchecked(&rds_cong_generation);
69832 if (waitqueue_active(&map->m_waitq))
69833 wake_up(&map->m_waitq);
69834 if (waitqueue_active(&rds_poll_waitq))
69835 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69836
69837 int rds_cong_updated_since(unsigned long *recent)
69838 {
69839 - unsigned long gen = atomic_read(&rds_cong_generation);
69840 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69841
69842 if (likely(*recent == gen))
69843 return 0;
69844 diff -urNp linux-3.0.4/net/rds/ib_cm.c linux-3.0.4/net/rds/ib_cm.c
69845 --- linux-3.0.4/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
69846 +++ linux-3.0.4/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
69847 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
69848 /* Clear the ACK state */
69849 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
69850 #ifdef KERNEL_HAS_ATOMIC64
69851 - atomic64_set(&ic->i_ack_next, 0);
69852 + atomic64_set_unchecked(&ic->i_ack_next, 0);
69853 #else
69854 ic->i_ack_next = 0;
69855 #endif
69856 diff -urNp linux-3.0.4/net/rds/ib.h linux-3.0.4/net/rds/ib.h
69857 --- linux-3.0.4/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
69858 +++ linux-3.0.4/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
69859 @@ -127,7 +127,7 @@ struct rds_ib_connection {
69860 /* sending acks */
69861 unsigned long i_ack_flags;
69862 #ifdef KERNEL_HAS_ATOMIC64
69863 - atomic64_t i_ack_next; /* next ACK to send */
69864 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
69865 #else
69866 spinlock_t i_ack_lock; /* protect i_ack_next */
69867 u64 i_ack_next; /* next ACK to send */
69868 diff -urNp linux-3.0.4/net/rds/ib_recv.c linux-3.0.4/net/rds/ib_recv.c
69869 --- linux-3.0.4/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
69870 +++ linux-3.0.4/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
69871 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
69872 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
69873 int ack_required)
69874 {
69875 - atomic64_set(&ic->i_ack_next, seq);
69876 + atomic64_set_unchecked(&ic->i_ack_next, seq);
69877 if (ack_required) {
69878 smp_mb__before_clear_bit();
69879 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69880 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
69881 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69882 smp_mb__after_clear_bit();
69883
69884 - return atomic64_read(&ic->i_ack_next);
69885 + return atomic64_read_unchecked(&ic->i_ack_next);
69886 }
69887 #endif
69888
69889 diff -urNp linux-3.0.4/net/rds/iw_cm.c linux-3.0.4/net/rds/iw_cm.c
69890 --- linux-3.0.4/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
69891 +++ linux-3.0.4/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
69892 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
69893 /* Clear the ACK state */
69894 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
69895 #ifdef KERNEL_HAS_ATOMIC64
69896 - atomic64_set(&ic->i_ack_next, 0);
69897 + atomic64_set_unchecked(&ic->i_ack_next, 0);
69898 #else
69899 ic->i_ack_next = 0;
69900 #endif
69901 diff -urNp linux-3.0.4/net/rds/iw.h linux-3.0.4/net/rds/iw.h
69902 --- linux-3.0.4/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
69903 +++ linux-3.0.4/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
69904 @@ -133,7 +133,7 @@ struct rds_iw_connection {
69905 /* sending acks */
69906 unsigned long i_ack_flags;
69907 #ifdef KERNEL_HAS_ATOMIC64
69908 - atomic64_t i_ack_next; /* next ACK to send */
69909 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
69910 #else
69911 spinlock_t i_ack_lock; /* protect i_ack_next */
69912 u64 i_ack_next; /* next ACK to send */
69913 diff -urNp linux-3.0.4/net/rds/iw_rdma.c linux-3.0.4/net/rds/iw_rdma.c
69914 --- linux-3.0.4/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
69915 +++ linux-3.0.4/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
69916 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69917 struct rdma_cm_id *pcm_id;
69918 int rc;
69919
69920 + pax_track_stack();
69921 +
69922 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69923 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69924
69925 diff -urNp linux-3.0.4/net/rds/iw_recv.c linux-3.0.4/net/rds/iw_recv.c
69926 --- linux-3.0.4/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
69927 +++ linux-3.0.4/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
69928 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
69929 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
69930 int ack_required)
69931 {
69932 - atomic64_set(&ic->i_ack_next, seq);
69933 + atomic64_set_unchecked(&ic->i_ack_next, seq);
69934 if (ack_required) {
69935 smp_mb__before_clear_bit();
69936 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69937 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
69938 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69939 smp_mb__after_clear_bit();
69940
69941 - return atomic64_read(&ic->i_ack_next);
69942 + return atomic64_read_unchecked(&ic->i_ack_next);
69943 }
69944 #endif
69945
69946 diff -urNp linux-3.0.4/net/rxrpc/af_rxrpc.c linux-3.0.4/net/rxrpc/af_rxrpc.c
69947 --- linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
69948 +++ linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
69949 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
69950 __be32 rxrpc_epoch;
69951
69952 /* current debugging ID */
69953 -atomic_t rxrpc_debug_id;
69954 +atomic_unchecked_t rxrpc_debug_id;
69955
69956 /* count of skbs currently in use */
69957 atomic_t rxrpc_n_skbs;
69958 diff -urNp linux-3.0.4/net/rxrpc/ar-ack.c linux-3.0.4/net/rxrpc/ar-ack.c
69959 --- linux-3.0.4/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
69960 +++ linux-3.0.4/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
69961 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
69962
69963 _enter("{%d,%d,%d,%d},",
69964 call->acks_hard, call->acks_unacked,
69965 - atomic_read(&call->sequence),
69966 + atomic_read_unchecked(&call->sequence),
69967 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69968
69969 stop = 0;
69970 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
69971
69972 /* each Tx packet has a new serial number */
69973 sp->hdr.serial =
69974 - htonl(atomic_inc_return(&call->conn->serial));
69975 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69976
69977 hdr = (struct rxrpc_header *) txb->head;
69978 hdr->serial = sp->hdr.serial;
69979 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
69980 */
69981 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69982 {
69983 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69984 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69985 }
69986
69987 /*
69988 @@ -629,7 +629,7 @@ process_further:
69989
69990 latest = ntohl(sp->hdr.serial);
69991 hard = ntohl(ack.firstPacket);
69992 - tx = atomic_read(&call->sequence);
69993 + tx = atomic_read_unchecked(&call->sequence);
69994
69995 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69996 latest,
69997 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
69998 u32 abort_code = RX_PROTOCOL_ERROR;
69999 u8 *acks = NULL;
70000
70001 + pax_track_stack();
70002 +
70003 //printk("\n--------------------\n");
70004 _enter("{%d,%s,%lx} [%lu]",
70005 call->debug_id, rxrpc_call_states[call->state], call->events,
70006 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
70007 goto maybe_reschedule;
70008
70009 send_ACK_with_skew:
70010 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
70011 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
70012 ntohl(ack.serial));
70013 send_ACK:
70014 mtu = call->conn->trans->peer->if_mtu;
70015 @@ -1173,7 +1175,7 @@ send_ACK:
70016 ackinfo.rxMTU = htonl(5692);
70017 ackinfo.jumbo_max = htonl(4);
70018
70019 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70020 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70021 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70022 ntohl(hdr.serial),
70023 ntohs(ack.maxSkew),
70024 @@ -1191,7 +1193,7 @@ send_ACK:
70025 send_message:
70026 _debug("send message");
70027
70028 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70029 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70030 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
70031 send_message_2:
70032
70033 diff -urNp linux-3.0.4/net/rxrpc/ar-call.c linux-3.0.4/net/rxrpc/ar-call.c
70034 --- linux-3.0.4/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
70035 +++ linux-3.0.4/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
70036 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
70037 spin_lock_init(&call->lock);
70038 rwlock_init(&call->state_lock);
70039 atomic_set(&call->usage, 1);
70040 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
70041 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70042 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
70043
70044 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
70045 diff -urNp linux-3.0.4/net/rxrpc/ar-connection.c linux-3.0.4/net/rxrpc/ar-connection.c
70046 --- linux-3.0.4/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
70047 +++ linux-3.0.4/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
70048 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
70049 rwlock_init(&conn->lock);
70050 spin_lock_init(&conn->state_lock);
70051 atomic_set(&conn->usage, 1);
70052 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
70053 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70054 conn->avail_calls = RXRPC_MAXCALLS;
70055 conn->size_align = 4;
70056 conn->header_size = sizeof(struct rxrpc_header);
70057 diff -urNp linux-3.0.4/net/rxrpc/ar-connevent.c linux-3.0.4/net/rxrpc/ar-connevent.c
70058 --- linux-3.0.4/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
70059 +++ linux-3.0.4/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
70060 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
70061
70062 len = iov[0].iov_len + iov[1].iov_len;
70063
70064 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70065 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70066 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
70067
70068 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70069 diff -urNp linux-3.0.4/net/rxrpc/ar-input.c linux-3.0.4/net/rxrpc/ar-input.c
70070 --- linux-3.0.4/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
70071 +++ linux-3.0.4/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
70072 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
70073 /* track the latest serial number on this connection for ACK packet
70074 * information */
70075 serial = ntohl(sp->hdr.serial);
70076 - hi_serial = atomic_read(&call->conn->hi_serial);
70077 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
70078 while (serial > hi_serial)
70079 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
70080 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
70081 serial);
70082
70083 /* request ACK generation for any ACK or DATA packet that requests
70084 diff -urNp linux-3.0.4/net/rxrpc/ar-internal.h linux-3.0.4/net/rxrpc/ar-internal.h
70085 --- linux-3.0.4/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
70086 +++ linux-3.0.4/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
70087 @@ -272,8 +272,8 @@ struct rxrpc_connection {
70088 int error; /* error code for local abort */
70089 int debug_id; /* debug ID for printks */
70090 unsigned call_counter; /* call ID counter */
70091 - atomic_t serial; /* packet serial number counter */
70092 - atomic_t hi_serial; /* highest serial number received */
70093 + atomic_unchecked_t serial; /* packet serial number counter */
70094 + atomic_unchecked_t hi_serial; /* highest serial number received */
70095 u8 avail_calls; /* number of calls available */
70096 u8 size_align; /* data size alignment (for security) */
70097 u8 header_size; /* rxrpc + security header size */
70098 @@ -346,7 +346,7 @@ struct rxrpc_call {
70099 spinlock_t lock;
70100 rwlock_t state_lock; /* lock for state transition */
70101 atomic_t usage;
70102 - atomic_t sequence; /* Tx data packet sequence counter */
70103 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
70104 u32 abort_code; /* local/remote abort code */
70105 enum { /* current state of call */
70106 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
70107 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
70108 */
70109 extern atomic_t rxrpc_n_skbs;
70110 extern __be32 rxrpc_epoch;
70111 -extern atomic_t rxrpc_debug_id;
70112 +extern atomic_unchecked_t rxrpc_debug_id;
70113 extern struct workqueue_struct *rxrpc_workqueue;
70114
70115 /*
70116 diff -urNp linux-3.0.4/net/rxrpc/ar-local.c linux-3.0.4/net/rxrpc/ar-local.c
70117 --- linux-3.0.4/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
70118 +++ linux-3.0.4/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
70119 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
70120 spin_lock_init(&local->lock);
70121 rwlock_init(&local->services_lock);
70122 atomic_set(&local->usage, 1);
70123 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
70124 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70125 memcpy(&local->srx, srx, sizeof(*srx));
70126 }
70127
70128 diff -urNp linux-3.0.4/net/rxrpc/ar-output.c linux-3.0.4/net/rxrpc/ar-output.c
70129 --- linux-3.0.4/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
70130 +++ linux-3.0.4/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
70131 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
70132 sp->hdr.cid = call->cid;
70133 sp->hdr.callNumber = call->call_id;
70134 sp->hdr.seq =
70135 - htonl(atomic_inc_return(&call->sequence));
70136 + htonl(atomic_inc_return_unchecked(&call->sequence));
70137 sp->hdr.serial =
70138 - htonl(atomic_inc_return(&conn->serial));
70139 + htonl(atomic_inc_return_unchecked(&conn->serial));
70140 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
70141 sp->hdr.userStatus = 0;
70142 sp->hdr.securityIndex = conn->security_ix;
70143 diff -urNp linux-3.0.4/net/rxrpc/ar-peer.c linux-3.0.4/net/rxrpc/ar-peer.c
70144 --- linux-3.0.4/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
70145 +++ linux-3.0.4/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
70146 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
70147 INIT_LIST_HEAD(&peer->error_targets);
70148 spin_lock_init(&peer->lock);
70149 atomic_set(&peer->usage, 1);
70150 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
70151 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70152 memcpy(&peer->srx, srx, sizeof(*srx));
70153
70154 rxrpc_assess_MTU_size(peer);
70155 diff -urNp linux-3.0.4/net/rxrpc/ar-proc.c linux-3.0.4/net/rxrpc/ar-proc.c
70156 --- linux-3.0.4/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
70157 +++ linux-3.0.4/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
70158 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
70159 atomic_read(&conn->usage),
70160 rxrpc_conn_states[conn->state],
70161 key_serial(conn->key),
70162 - atomic_read(&conn->serial),
70163 - atomic_read(&conn->hi_serial));
70164 + atomic_read_unchecked(&conn->serial),
70165 + atomic_read_unchecked(&conn->hi_serial));
70166
70167 return 0;
70168 }
70169 diff -urNp linux-3.0.4/net/rxrpc/ar-transport.c linux-3.0.4/net/rxrpc/ar-transport.c
70170 --- linux-3.0.4/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
70171 +++ linux-3.0.4/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
70172 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
70173 spin_lock_init(&trans->client_lock);
70174 rwlock_init(&trans->conn_lock);
70175 atomic_set(&trans->usage, 1);
70176 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
70177 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70178
70179 if (peer->srx.transport.family == AF_INET) {
70180 switch (peer->srx.transport_type) {
70181 diff -urNp linux-3.0.4/net/rxrpc/rxkad.c linux-3.0.4/net/rxrpc/rxkad.c
70182 --- linux-3.0.4/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
70183 +++ linux-3.0.4/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
70184 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
70185 u16 check;
70186 int nsg;
70187
70188 + pax_track_stack();
70189 +
70190 sp = rxrpc_skb(skb);
70191
70192 _enter("");
70193 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
70194 u16 check;
70195 int nsg;
70196
70197 + pax_track_stack();
70198 +
70199 _enter("");
70200
70201 sp = rxrpc_skb(skb);
70202 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
70203
70204 len = iov[0].iov_len + iov[1].iov_len;
70205
70206 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70207 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70208 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
70209
70210 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70211 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
70212
70213 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
70214
70215 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
70216 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70217 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
70218
70219 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
70220 diff -urNp linux-3.0.4/net/sctp/proc.c linux-3.0.4/net/sctp/proc.c
70221 --- linux-3.0.4/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
70222 +++ linux-3.0.4/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
70223 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
70224 seq_printf(seq,
70225 "%8pK %8pK %-3d %-3d %-2d %-4d "
70226 "%4d %8d %8d %7d %5lu %-5d %5d ",
70227 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
70228 + assoc, sk,
70229 + sctp_sk(sk)->type, sk->sk_state,
70230 assoc->state, hash,
70231 assoc->assoc_id,
70232 assoc->sndbuf_used,
70233 diff -urNp linux-3.0.4/net/sctp/socket.c linux-3.0.4/net/sctp/socket.c
70234 --- linux-3.0.4/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
70235 +++ linux-3.0.4/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
70236 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
70237 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
70238 if (space_left < addrlen)
70239 return -ENOMEM;
70240 - if (copy_to_user(to, &temp, addrlen))
70241 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
70242 return -EFAULT;
70243 to += addrlen;
70244 cnt++;
70245 diff -urNp linux-3.0.4/net/socket.c linux-3.0.4/net/socket.c
70246 --- linux-3.0.4/net/socket.c 2011-08-23 21:44:40.000000000 -0400
70247 +++ linux-3.0.4/net/socket.c 2011-08-23 21:48:14.000000000 -0400
70248 @@ -88,6 +88,7 @@
70249 #include <linux/nsproxy.h>
70250 #include <linux/magic.h>
70251 #include <linux/slab.h>
70252 +#include <linux/in.h>
70253
70254 #include <asm/uaccess.h>
70255 #include <asm/unistd.h>
70256 @@ -105,6 +106,8 @@
70257 #include <linux/sockios.h>
70258 #include <linux/atalk.h>
70259
70260 +#include <linux/grsock.h>
70261 +
70262 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
70263 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
70264 unsigned long nr_segs, loff_t pos);
70265 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
70266 &sockfs_dentry_operations, SOCKFS_MAGIC);
70267 }
70268
70269 -static struct vfsmount *sock_mnt __read_mostly;
70270 +struct vfsmount *sock_mnt __read_mostly;
70271
70272 static struct file_system_type sock_fs_type = {
70273 .name = "sockfs",
70274 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
70275 return -EAFNOSUPPORT;
70276 if (type < 0 || type >= SOCK_MAX)
70277 return -EINVAL;
70278 + if (protocol < 0)
70279 + return -EINVAL;
70280
70281 /* Compatibility.
70282
70283 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
70284 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
70285 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
70286
70287 + if(!gr_search_socket(family, type, protocol)) {
70288 + retval = -EACCES;
70289 + goto out;
70290 + }
70291 +
70292 + if (gr_handle_sock_all(family, type, protocol)) {
70293 + retval = -EACCES;
70294 + goto out;
70295 + }
70296 +
70297 retval = sock_create(family, type, protocol, &sock);
70298 if (retval < 0)
70299 goto out;
70300 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70301 if (sock) {
70302 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
70303 if (err >= 0) {
70304 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
70305 + err = -EACCES;
70306 + goto error;
70307 + }
70308 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
70309 + if (err)
70310 + goto error;
70311 +
70312 err = security_socket_bind(sock,
70313 (struct sockaddr *)&address,
70314 addrlen);
70315 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70316 (struct sockaddr *)
70317 &address, addrlen);
70318 }
70319 +error:
70320 fput_light(sock->file, fput_needed);
70321 }
70322 return err;
70323 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
70324 if ((unsigned)backlog > somaxconn)
70325 backlog = somaxconn;
70326
70327 + if (gr_handle_sock_server_other(sock->sk)) {
70328 + err = -EPERM;
70329 + goto error;
70330 + }
70331 +
70332 + err = gr_search_listen(sock);
70333 + if (err)
70334 + goto error;
70335 +
70336 err = security_socket_listen(sock, backlog);
70337 if (!err)
70338 err = sock->ops->listen(sock, backlog);
70339
70340 +error:
70341 fput_light(sock->file, fput_needed);
70342 }
70343 return err;
70344 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70345 newsock->type = sock->type;
70346 newsock->ops = sock->ops;
70347
70348 + if (gr_handle_sock_server_other(sock->sk)) {
70349 + err = -EPERM;
70350 + sock_release(newsock);
70351 + goto out_put;
70352 + }
70353 +
70354 + err = gr_search_accept(sock);
70355 + if (err) {
70356 + sock_release(newsock);
70357 + goto out_put;
70358 + }
70359 +
70360 /*
70361 * We don't need try_module_get here, as the listening socket (sock)
70362 * has the protocol module (sock->ops->owner) held.
70363 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70364 fd_install(newfd, newfile);
70365 err = newfd;
70366
70367 + gr_attach_curr_ip(newsock->sk);
70368 +
70369 out_put:
70370 fput_light(sock->file, fput_needed);
70371 out:
70372 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70373 int, addrlen)
70374 {
70375 struct socket *sock;
70376 + struct sockaddr *sck;
70377 struct sockaddr_storage address;
70378 int err, fput_needed;
70379
70380 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70381 if (err < 0)
70382 goto out_put;
70383
70384 + sck = (struct sockaddr *)&address;
70385 +
70386 + if (gr_handle_sock_client(sck)) {
70387 + err = -EACCES;
70388 + goto out_put;
70389 + }
70390 +
70391 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
70392 + if (err)
70393 + goto out_put;
70394 +
70395 err =
70396 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
70397 if (err)
70398 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
70399 unsigned char *ctl_buf = ctl;
70400 int err, ctl_len, iov_size, total_len;
70401
70402 + pax_track_stack();
70403 +
70404 err = -EFAULT;
70405 if (MSG_CMSG_COMPAT & flags) {
70406 if (get_compat_msghdr(msg_sys, msg_compat))
70407 diff -urNp linux-3.0.4/net/sunrpc/sched.c linux-3.0.4/net/sunrpc/sched.c
70408 --- linux-3.0.4/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
70409 +++ linux-3.0.4/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
70410 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
70411 #ifdef RPC_DEBUG
70412 static void rpc_task_set_debuginfo(struct rpc_task *task)
70413 {
70414 - static atomic_t rpc_pid;
70415 + static atomic_unchecked_t rpc_pid;
70416
70417 - task->tk_pid = atomic_inc_return(&rpc_pid);
70418 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
70419 }
70420 #else
70421 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
70422 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c
70423 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
70424 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
70425 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
70426 static unsigned int min_max_inline = 4096;
70427 static unsigned int max_max_inline = 65536;
70428
70429 -atomic_t rdma_stat_recv;
70430 -atomic_t rdma_stat_read;
70431 -atomic_t rdma_stat_write;
70432 -atomic_t rdma_stat_sq_starve;
70433 -atomic_t rdma_stat_rq_starve;
70434 -atomic_t rdma_stat_rq_poll;
70435 -atomic_t rdma_stat_rq_prod;
70436 -atomic_t rdma_stat_sq_poll;
70437 -atomic_t rdma_stat_sq_prod;
70438 +atomic_unchecked_t rdma_stat_recv;
70439 +atomic_unchecked_t rdma_stat_read;
70440 +atomic_unchecked_t rdma_stat_write;
70441 +atomic_unchecked_t rdma_stat_sq_starve;
70442 +atomic_unchecked_t rdma_stat_rq_starve;
70443 +atomic_unchecked_t rdma_stat_rq_poll;
70444 +atomic_unchecked_t rdma_stat_rq_prod;
70445 +atomic_unchecked_t rdma_stat_sq_poll;
70446 +atomic_unchecked_t rdma_stat_sq_prod;
70447
70448 /* Temporary NFS request map and context caches */
70449 struct kmem_cache *svc_rdma_map_cachep;
70450 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
70451 len -= *ppos;
70452 if (len > *lenp)
70453 len = *lenp;
70454 - if (len && copy_to_user(buffer, str_buf, len))
70455 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
70456 return -EFAULT;
70457 *lenp = len;
70458 *ppos += len;
70459 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
70460 {
70461 .procname = "rdma_stat_read",
70462 .data = &rdma_stat_read,
70463 - .maxlen = sizeof(atomic_t),
70464 + .maxlen = sizeof(atomic_unchecked_t),
70465 .mode = 0644,
70466 .proc_handler = read_reset_stat,
70467 },
70468 {
70469 .procname = "rdma_stat_recv",
70470 .data = &rdma_stat_recv,
70471 - .maxlen = sizeof(atomic_t),
70472 + .maxlen = sizeof(atomic_unchecked_t),
70473 .mode = 0644,
70474 .proc_handler = read_reset_stat,
70475 },
70476 {
70477 .procname = "rdma_stat_write",
70478 .data = &rdma_stat_write,
70479 - .maxlen = sizeof(atomic_t),
70480 + .maxlen = sizeof(atomic_unchecked_t),
70481 .mode = 0644,
70482 .proc_handler = read_reset_stat,
70483 },
70484 {
70485 .procname = "rdma_stat_sq_starve",
70486 .data = &rdma_stat_sq_starve,
70487 - .maxlen = sizeof(atomic_t),
70488 + .maxlen = sizeof(atomic_unchecked_t),
70489 .mode = 0644,
70490 .proc_handler = read_reset_stat,
70491 },
70492 {
70493 .procname = "rdma_stat_rq_starve",
70494 .data = &rdma_stat_rq_starve,
70495 - .maxlen = sizeof(atomic_t),
70496 + .maxlen = sizeof(atomic_unchecked_t),
70497 .mode = 0644,
70498 .proc_handler = read_reset_stat,
70499 },
70500 {
70501 .procname = "rdma_stat_rq_poll",
70502 .data = &rdma_stat_rq_poll,
70503 - .maxlen = sizeof(atomic_t),
70504 + .maxlen = sizeof(atomic_unchecked_t),
70505 .mode = 0644,
70506 .proc_handler = read_reset_stat,
70507 },
70508 {
70509 .procname = "rdma_stat_rq_prod",
70510 .data = &rdma_stat_rq_prod,
70511 - .maxlen = sizeof(atomic_t),
70512 + .maxlen = sizeof(atomic_unchecked_t),
70513 .mode = 0644,
70514 .proc_handler = read_reset_stat,
70515 },
70516 {
70517 .procname = "rdma_stat_sq_poll",
70518 .data = &rdma_stat_sq_poll,
70519 - .maxlen = sizeof(atomic_t),
70520 + .maxlen = sizeof(atomic_unchecked_t),
70521 .mode = 0644,
70522 .proc_handler = read_reset_stat,
70523 },
70524 {
70525 .procname = "rdma_stat_sq_prod",
70526 .data = &rdma_stat_sq_prod,
70527 - .maxlen = sizeof(atomic_t),
70528 + .maxlen = sizeof(atomic_unchecked_t),
70529 .mode = 0644,
70530 .proc_handler = read_reset_stat,
70531 },
70532 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70533 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
70534 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
70535 @@ -499,7 +499,7 @@ next_sge:
70536 svc_rdma_put_context(ctxt, 0);
70537 goto out;
70538 }
70539 - atomic_inc(&rdma_stat_read);
70540 + atomic_inc_unchecked(&rdma_stat_read);
70541
70542 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
70543 chl_map->ch[ch_no].count -= read_wr.num_sge;
70544 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70545 dto_q);
70546 list_del_init(&ctxt->dto_q);
70547 } else {
70548 - atomic_inc(&rdma_stat_rq_starve);
70549 + atomic_inc_unchecked(&rdma_stat_rq_starve);
70550 clear_bit(XPT_DATA, &xprt->xpt_flags);
70551 ctxt = NULL;
70552 }
70553 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70554 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
70555 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
70556 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
70557 - atomic_inc(&rdma_stat_recv);
70558 + atomic_inc_unchecked(&rdma_stat_recv);
70559
70560 /* Build up the XDR from the receive buffers. */
70561 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
70562 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
70563 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
70564 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
70565 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
70566 write_wr.wr.rdma.remote_addr = to;
70567
70568 /* Post It */
70569 - atomic_inc(&rdma_stat_write);
70570 + atomic_inc_unchecked(&rdma_stat_write);
70571 if (svc_rdma_send(xprt, &write_wr))
70572 goto err;
70573 return 0;
70574 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
70575 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
70576 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
70577 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
70578 return;
70579
70580 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
70581 - atomic_inc(&rdma_stat_rq_poll);
70582 + atomic_inc_unchecked(&rdma_stat_rq_poll);
70583
70584 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
70585 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
70586 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
70587 }
70588
70589 if (ctxt)
70590 - atomic_inc(&rdma_stat_rq_prod);
70591 + atomic_inc_unchecked(&rdma_stat_rq_prod);
70592
70593 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
70594 /*
70595 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
70596 return;
70597
70598 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
70599 - atomic_inc(&rdma_stat_sq_poll);
70600 + atomic_inc_unchecked(&rdma_stat_sq_poll);
70601 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
70602 if (wc.status != IB_WC_SUCCESS)
70603 /* Close the transport */
70604 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
70605 }
70606
70607 if (ctxt)
70608 - atomic_inc(&rdma_stat_sq_prod);
70609 + atomic_inc_unchecked(&rdma_stat_sq_prod);
70610 }
70611
70612 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
70613 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
70614 spin_lock_bh(&xprt->sc_lock);
70615 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
70616 spin_unlock_bh(&xprt->sc_lock);
70617 - atomic_inc(&rdma_stat_sq_starve);
70618 + atomic_inc_unchecked(&rdma_stat_sq_starve);
70619
70620 /* See if we can opportunistically reap SQ WR to make room */
70621 sq_cq_reap(xprt);
70622 diff -urNp linux-3.0.4/net/sysctl_net.c linux-3.0.4/net/sysctl_net.c
70623 --- linux-3.0.4/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
70624 +++ linux-3.0.4/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
70625 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
70626 struct ctl_table *table)
70627 {
70628 /* Allow network administrator to have same access as root. */
70629 - if (capable(CAP_NET_ADMIN)) {
70630 + if (capable_nolog(CAP_NET_ADMIN)) {
70631 int mode = (table->mode >> 6) & 7;
70632 return (mode << 6) | (mode << 3) | mode;
70633 }
70634 diff -urNp linux-3.0.4/net/unix/af_unix.c linux-3.0.4/net/unix/af_unix.c
70635 --- linux-3.0.4/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
70636 +++ linux-3.0.4/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
70637 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
70638 err = -ECONNREFUSED;
70639 if (!S_ISSOCK(inode->i_mode))
70640 goto put_fail;
70641 +
70642 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70643 + err = -EACCES;
70644 + goto put_fail;
70645 + }
70646 +
70647 u = unix_find_socket_byinode(inode);
70648 if (!u)
70649 goto put_fail;
70650 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
70651 if (u) {
70652 struct dentry *dentry;
70653 dentry = unix_sk(u)->dentry;
70654 +
70655 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
70656 + err = -EPERM;
70657 + sock_put(u);
70658 + goto fail;
70659 + }
70660 +
70661 if (dentry)
70662 touch_atime(unix_sk(u)->mnt, dentry);
70663 } else
70664 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
70665 err = security_path_mknod(&nd.path, dentry, mode, 0);
70666 if (err)
70667 goto out_mknod_drop_write;
70668 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70669 + err = -EACCES;
70670 + goto out_mknod_drop_write;
70671 + }
70672 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70673 out_mknod_drop_write:
70674 mnt_drop_write(nd.path.mnt);
70675 if (err)
70676 goto out_mknod_dput;
70677 +
70678 + gr_handle_create(dentry, nd.path.mnt);
70679 +
70680 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70681 dput(nd.path.dentry);
70682 nd.path.dentry = dentry;
70683 diff -urNp linux-3.0.4/net/wireless/core.h linux-3.0.4/net/wireless/core.h
70684 --- linux-3.0.4/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
70685 +++ linux-3.0.4/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
70686 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
70687 struct mutex mtx;
70688
70689 /* rfkill support */
70690 - struct rfkill_ops rfkill_ops;
70691 + rfkill_ops_no_const rfkill_ops;
70692 struct rfkill *rfkill;
70693 struct work_struct rfkill_sync;
70694
70695 diff -urNp linux-3.0.4/net/wireless/wext-core.c linux-3.0.4/net/wireless/wext-core.c
70696 --- linux-3.0.4/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
70697 +++ linux-3.0.4/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
70698 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
70699 */
70700
70701 /* Support for very large requests */
70702 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70703 - (user_length > descr->max_tokens)) {
70704 + if (user_length > descr->max_tokens) {
70705 /* Allow userspace to GET more than max so
70706 * we can support any size GET requests.
70707 * There is still a limit : -ENOMEM.
70708 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
70709 }
70710 }
70711
70712 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70713 - /*
70714 - * If this is a GET, but not NOMAX, it means that the extra
70715 - * data is not bounded by userspace, but by max_tokens. Thus
70716 - * set the length to max_tokens. This matches the extra data
70717 - * allocation.
70718 - * The driver should fill it with the number of tokens it
70719 - * provided, and it may check iwp->length rather than having
70720 - * knowledge of max_tokens. If the driver doesn't change the
70721 - * iwp->length, this ioctl just copies back max_token tokens
70722 - * filled with zeroes. Hopefully the driver isn't claiming
70723 - * them to be valid data.
70724 - */
70725 - iwp->length = descr->max_tokens;
70726 - }
70727 -
70728 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70729
70730 iwp->length += essid_compat;
70731 diff -urNp linux-3.0.4/net/xfrm/xfrm_policy.c linux-3.0.4/net/xfrm/xfrm_policy.c
70732 --- linux-3.0.4/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
70733 +++ linux-3.0.4/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
70734 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
70735 {
70736 policy->walk.dead = 1;
70737
70738 - atomic_inc(&policy->genid);
70739 + atomic_inc_unchecked(&policy->genid);
70740
70741 if (del_timer(&policy->timer))
70742 xfrm_pol_put(policy);
70743 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
70744 hlist_add_head(&policy->bydst, chain);
70745 xfrm_pol_hold(policy);
70746 net->xfrm.policy_count[dir]++;
70747 - atomic_inc(&flow_cache_genid);
70748 + atomic_inc_unchecked(&flow_cache_genid);
70749 if (delpol)
70750 __xfrm_policy_unlink(delpol, dir);
70751 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70752 @@ -1528,7 +1528,7 @@ free_dst:
70753 goto out;
70754 }
70755
70756 -static int inline
70757 +static inline int
70758 xfrm_dst_alloc_copy(void **target, const void *src, int size)
70759 {
70760 if (!*target) {
70761 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
70762 return 0;
70763 }
70764
70765 -static int inline
70766 +static inline int
70767 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
70768 {
70769 #ifdef CONFIG_XFRM_SUB_POLICY
70770 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
70771 #endif
70772 }
70773
70774 -static int inline
70775 +static inline int
70776 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
70777 {
70778 #ifdef CONFIG_XFRM_SUB_POLICY
70779 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
70780
70781 xdst->num_pols = num_pols;
70782 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
70783 - xdst->policy_genid = atomic_read(&pols[0]->genid);
70784 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
70785
70786 return xdst;
70787 }
70788 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
70789 if (xdst->xfrm_genid != dst->xfrm->genid)
70790 return 0;
70791 if (xdst->num_pols > 0 &&
70792 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
70793 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
70794 return 0;
70795
70796 mtu = dst_mtu(dst->child);
70797 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
70798 sizeof(pol->xfrm_vec[i].saddr));
70799 pol->xfrm_vec[i].encap_family = mp->new_family;
70800 /* flush bundles */
70801 - atomic_inc(&pol->genid);
70802 + atomic_inc_unchecked(&pol->genid);
70803 }
70804 }
70805
70806 diff -urNp linux-3.0.4/net/xfrm/xfrm_user.c linux-3.0.4/net/xfrm/xfrm_user.c
70807 --- linux-3.0.4/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
70808 +++ linux-3.0.4/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
70809 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
70810 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70811 int i;
70812
70813 + pax_track_stack();
70814 +
70815 if (xp->xfrm_nr == 0)
70816 return 0;
70817
70818 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
70819 int err;
70820 int n = 0;
70821
70822 + pax_track_stack();
70823 +
70824 if (attrs[XFRMA_MIGRATE] == NULL)
70825 return -EINVAL;
70826
70827 diff -urNp linux-3.0.4/scripts/basic/fixdep.c linux-3.0.4/scripts/basic/fixdep.c
70828 --- linux-3.0.4/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
70829 +++ linux-3.0.4/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
70830 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
70831
70832 static void parse_config_file(const char *map, size_t len)
70833 {
70834 - const int *end = (const int *) (map + len);
70835 + const unsigned int *end = (const unsigned int *) (map + len);
70836 /* start at +1, so that p can never be < map */
70837 - const int *m = (const int *) map + 1;
70838 + const unsigned int *m = (const unsigned int *) map + 1;
70839 const char *p, *q;
70840
70841 for (; m < end; m++) {
70842 @@ -405,7 +405,7 @@ static void print_deps(void)
70843 static void traps(void)
70844 {
70845 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70846 - int *p = (int *)test;
70847 + unsigned int *p = (unsigned int *)test;
70848
70849 if (*p != INT_CONF) {
70850 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70851 diff -urNp linux-3.0.4/scripts/gcc-plugin.sh linux-3.0.4/scripts/gcc-plugin.sh
70852 --- linux-3.0.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
70853 +++ linux-3.0.4/scripts/gcc-plugin.sh 2011-08-23 21:47:56.000000000 -0400
70854 @@ -0,0 +1,2 @@
70855 +#!/bin/sh
70856 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
70857 diff -urNp linux-3.0.4/scripts/Makefile.build linux-3.0.4/scripts/Makefile.build
70858 --- linux-3.0.4/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
70859 +++ linux-3.0.4/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
70860 @@ -109,7 +109,7 @@ endif
70861 endif
70862
70863 # Do not include host rules unless needed
70864 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70865 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70866 include scripts/Makefile.host
70867 endif
70868
70869 diff -urNp linux-3.0.4/scripts/Makefile.clean linux-3.0.4/scripts/Makefile.clean
70870 --- linux-3.0.4/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
70871 +++ linux-3.0.4/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
70872 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70873 __clean-files := $(extra-y) $(always) \
70874 $(targets) $(clean-files) \
70875 $(host-progs) \
70876 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70877 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70878 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70879
70880 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
70881
70882 diff -urNp linux-3.0.4/scripts/Makefile.host linux-3.0.4/scripts/Makefile.host
70883 --- linux-3.0.4/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
70884 +++ linux-3.0.4/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
70885 @@ -31,6 +31,7 @@
70886 # Note: Shared libraries consisting of C++ files are not supported
70887
70888 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70889 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70890
70891 # C code
70892 # Executables compiled from a single .c file
70893 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70894 # Shared libaries (only .c supported)
70895 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70896 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70897 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70898 # Remove .so files from "xxx-objs"
70899 host-cobjs := $(filter-out %.so,$(host-cobjs))
70900
70901 diff -urNp linux-3.0.4/scripts/mod/file2alias.c linux-3.0.4/scripts/mod/file2alias.c
70902 --- linux-3.0.4/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
70903 +++ linux-3.0.4/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
70904 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70905 unsigned long size, unsigned long id_size,
70906 void *symval)
70907 {
70908 - int i;
70909 + unsigned int i;
70910
70911 if (size % id_size || size < id_size) {
70912 if (cross_build != 0)
70913 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70914 /* USB is special because the bcdDevice can be matched against a numeric range */
70915 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70916 static void do_usb_entry(struct usb_device_id *id,
70917 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70918 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70919 unsigned char range_lo, unsigned char range_hi,
70920 unsigned char max, struct module *mod)
70921 {
70922 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
70923 for (i = 0; i < count; i++) {
70924 const char *id = (char *)devs[i].id;
70925 char acpi_id[sizeof(devs[0].id)];
70926 - int j;
70927 + unsigned int j;
70928
70929 buf_printf(&mod->dev_table_buf,
70930 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70931 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
70932
70933 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70934 const char *id = (char *)card->devs[j].id;
70935 - int i2, j2;
70936 + unsigned int i2, j2;
70937 int dup = 0;
70938
70939 if (!id[0])
70940 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
70941 /* add an individual alias for every device entry */
70942 if (!dup) {
70943 char acpi_id[sizeof(card->devs[0].id)];
70944 - int k;
70945 + unsigned int k;
70946
70947 buf_printf(&mod->dev_table_buf,
70948 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70949 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
70950 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70951 char *alias)
70952 {
70953 - int i, j;
70954 + unsigned int i, j;
70955
70956 sprintf(alias, "dmi*");
70957
70958 diff -urNp linux-3.0.4/scripts/mod/modpost.c linux-3.0.4/scripts/mod/modpost.c
70959 --- linux-3.0.4/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
70960 +++ linux-3.0.4/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
70961 @@ -892,6 +892,7 @@ enum mismatch {
70962 ANY_INIT_TO_ANY_EXIT,
70963 ANY_EXIT_TO_ANY_INIT,
70964 EXPORT_TO_INIT_EXIT,
70965 + DATA_TO_TEXT
70966 };
70967
70968 struct sectioncheck {
70969 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
70970 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70971 .mismatch = EXPORT_TO_INIT_EXIT,
70972 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
70973 +},
70974 +/* Do not reference code from writable data */
70975 +{
70976 + .fromsec = { DATA_SECTIONS, NULL },
70977 + .tosec = { TEXT_SECTIONS, NULL },
70978 + .mismatch = DATA_TO_TEXT
70979 }
70980 };
70981
70982 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
70983 continue;
70984 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70985 continue;
70986 - if (sym->st_value == addr)
70987 - return sym;
70988 /* Find a symbol nearby - addr are maybe negative */
70989 d = sym->st_value - addr;
70990 + if (d == 0)
70991 + return sym;
70992 if (d < 0)
70993 d = addr - sym->st_value;
70994 if (d < distance) {
70995 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
70996 tosym, prl_to, prl_to, tosym);
70997 free(prl_to);
70998 break;
70999 + case DATA_TO_TEXT:
71000 +/*
71001 + fprintf(stderr,
71002 + "The variable %s references\n"
71003 + "the %s %s%s%s\n",
71004 + fromsym, to, sec2annotation(tosec), tosym, to_p);
71005 +*/
71006 + break;
71007 }
71008 fprintf(stderr, "\n");
71009 }
71010 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
71011 static void check_sec_ref(struct module *mod, const char *modname,
71012 struct elf_info *elf)
71013 {
71014 - int i;
71015 + unsigned int i;
71016 Elf_Shdr *sechdrs = elf->sechdrs;
71017
71018 /* Walk through all sections */
71019 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
71020 va_end(ap);
71021 }
71022
71023 -void buf_write(struct buffer *buf, const char *s, int len)
71024 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
71025 {
71026 if (buf->size - buf->pos < len) {
71027 buf->size += len + SZ;
71028 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
71029 if (fstat(fileno(file), &st) < 0)
71030 goto close_write;
71031
71032 - if (st.st_size != b->pos)
71033 + if (st.st_size != (off_t)b->pos)
71034 goto close_write;
71035
71036 tmp = NOFAIL(malloc(b->pos));
71037 diff -urNp linux-3.0.4/scripts/mod/modpost.h linux-3.0.4/scripts/mod/modpost.h
71038 --- linux-3.0.4/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
71039 +++ linux-3.0.4/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
71040 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
71041
71042 struct buffer {
71043 char *p;
71044 - int pos;
71045 - int size;
71046 + unsigned int pos;
71047 + unsigned int size;
71048 };
71049
71050 void __attribute__((format(printf, 2, 3)))
71051 buf_printf(struct buffer *buf, const char *fmt, ...);
71052
71053 void
71054 -buf_write(struct buffer *buf, const char *s, int len);
71055 +buf_write(struct buffer *buf, const char *s, unsigned int len);
71056
71057 struct module {
71058 struct module *next;
71059 diff -urNp linux-3.0.4/scripts/mod/sumversion.c linux-3.0.4/scripts/mod/sumversion.c
71060 --- linux-3.0.4/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
71061 +++ linux-3.0.4/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
71062 @@ -470,7 +470,7 @@ static void write_version(const char *fi
71063 goto out;
71064 }
71065
71066 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
71067 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
71068 warn("writing sum in %s failed: %s\n",
71069 filename, strerror(errno));
71070 goto out;
71071 diff -urNp linux-3.0.4/scripts/pnmtologo.c linux-3.0.4/scripts/pnmtologo.c
71072 --- linux-3.0.4/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
71073 +++ linux-3.0.4/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
71074 @@ -237,14 +237,14 @@ static void write_header(void)
71075 fprintf(out, " * Linux logo %s\n", logoname);
71076 fputs(" */\n\n", out);
71077 fputs("#include <linux/linux_logo.h>\n\n", out);
71078 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
71079 + fprintf(out, "static unsigned char %s_data[] = {\n",
71080 logoname);
71081 }
71082
71083 static void write_footer(void)
71084 {
71085 fputs("\n};\n\n", out);
71086 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
71087 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
71088 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
71089 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
71090 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
71091 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
71092 fputs("\n};\n\n", out);
71093
71094 /* write logo clut */
71095 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
71096 + fprintf(out, "static unsigned char %s_clut[] = {\n",
71097 logoname);
71098 write_hex_cnt = 0;
71099 for (i = 0; i < logo_clutsize; i++) {
71100 diff -urNp linux-3.0.4/security/apparmor/lsm.c linux-3.0.4/security/apparmor/lsm.c
71101 --- linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
71102 +++ linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
71103 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
71104 return error;
71105 }
71106
71107 -static struct security_operations apparmor_ops = {
71108 +static struct security_operations apparmor_ops __read_only = {
71109 .name = "apparmor",
71110
71111 .ptrace_access_check = apparmor_ptrace_access_check,
71112 diff -urNp linux-3.0.4/security/commoncap.c linux-3.0.4/security/commoncap.c
71113 --- linux-3.0.4/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
71114 +++ linux-3.0.4/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
71115 @@ -28,6 +28,7 @@
71116 #include <linux/prctl.h>
71117 #include <linux/securebits.h>
71118 #include <linux/user_namespace.h>
71119 +#include <net/sock.h>
71120
71121 /*
71122 * If a non-root user executes a setuid-root binary in
71123 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
71124
71125 int cap_netlink_recv(struct sk_buff *skb, int cap)
71126 {
71127 - if (!cap_raised(current_cap(), cap))
71128 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
71129 return -EPERM;
71130 return 0;
71131 }
71132 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
71133 {
71134 const struct cred *cred = current_cred();
71135
71136 + if (gr_acl_enable_at_secure())
71137 + return 1;
71138 +
71139 if (cred->uid != 0) {
71140 if (bprm->cap_effective)
71141 return 1;
71142 diff -urNp linux-3.0.4/security/integrity/ima/ima_api.c linux-3.0.4/security/integrity/ima/ima_api.c
71143 --- linux-3.0.4/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
71144 +++ linux-3.0.4/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
71145 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
71146 int result;
71147
71148 /* can overflow, only indicator */
71149 - atomic_long_inc(&ima_htable.violations);
71150 + atomic_long_inc_unchecked(&ima_htable.violations);
71151
71152 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
71153 if (!entry) {
71154 diff -urNp linux-3.0.4/security/integrity/ima/ima_fs.c linux-3.0.4/security/integrity/ima/ima_fs.c
71155 --- linux-3.0.4/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
71156 +++ linux-3.0.4/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
71157 @@ -28,12 +28,12 @@
71158 static int valid_policy = 1;
71159 #define TMPBUFLEN 12
71160 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
71161 - loff_t *ppos, atomic_long_t *val)
71162 + loff_t *ppos, atomic_long_unchecked_t *val)
71163 {
71164 char tmpbuf[TMPBUFLEN];
71165 ssize_t len;
71166
71167 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
71168 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
71169 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
71170 }
71171
71172 diff -urNp linux-3.0.4/security/integrity/ima/ima.h linux-3.0.4/security/integrity/ima/ima.h
71173 --- linux-3.0.4/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
71174 +++ linux-3.0.4/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
71175 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
71176 extern spinlock_t ima_queue_lock;
71177
71178 struct ima_h_table {
71179 - atomic_long_t len; /* number of stored measurements in the list */
71180 - atomic_long_t violations;
71181 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
71182 + atomic_long_unchecked_t violations;
71183 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
71184 };
71185 extern struct ima_h_table ima_htable;
71186 diff -urNp linux-3.0.4/security/integrity/ima/ima_queue.c linux-3.0.4/security/integrity/ima/ima_queue.c
71187 --- linux-3.0.4/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
71188 +++ linux-3.0.4/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
71189 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
71190 INIT_LIST_HEAD(&qe->later);
71191 list_add_tail_rcu(&qe->later, &ima_measurements);
71192
71193 - atomic_long_inc(&ima_htable.len);
71194 + atomic_long_inc_unchecked(&ima_htable.len);
71195 key = ima_hash_key(entry->digest);
71196 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
71197 return 0;
71198 diff -urNp linux-3.0.4/security/Kconfig linux-3.0.4/security/Kconfig
71199 --- linux-3.0.4/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
71200 +++ linux-3.0.4/security/Kconfig 2011-08-23 21:48:14.000000000 -0400
71201 @@ -4,6 +4,554 @@
71202
71203 menu "Security options"
71204
71205 +source grsecurity/Kconfig
71206 +
71207 +menu "PaX"
71208 +
71209 + config ARCH_TRACK_EXEC_LIMIT
71210 + bool
71211 +
71212 + config PAX_PER_CPU_PGD
71213 + bool
71214 +
71215 + config TASK_SIZE_MAX_SHIFT
71216 + int
71217 + depends on X86_64
71218 + default 47 if !PAX_PER_CPU_PGD
71219 + default 42 if PAX_PER_CPU_PGD
71220 +
71221 + config PAX_ENABLE_PAE
71222 + bool
71223 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
71224 +
71225 +config PAX
71226 + bool "Enable various PaX features"
71227 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
71228 + help
71229 + This allows you to enable various PaX features. PaX adds
71230 + intrusion prevention mechanisms to the kernel that reduce
71231 + the risks posed by exploitable memory corruption bugs.
71232 +
71233 +menu "PaX Control"
71234 + depends on PAX
71235 +
71236 +config PAX_SOFTMODE
71237 + bool 'Support soft mode'
71238 + select PAX_PT_PAX_FLAGS
71239 + help
71240 + Enabling this option will allow you to run PaX in soft mode, that
71241 + is, PaX features will not be enforced by default, only on executables
71242 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
71243 + is the only way to mark executables for soft mode use.
71244 +
71245 + Soft mode can be activated by using the "pax_softmode=1" kernel command
71246 + line option on boot. Furthermore you can control various PaX features
71247 + at runtime via the entries in /proc/sys/kernel/pax.
71248 +
71249 +config PAX_EI_PAX
71250 + bool 'Use legacy ELF header marking'
71251 + help
71252 + Enabling this option will allow you to control PaX features on
71253 + a per executable basis via the 'chpax' utility available at
71254 + http://pax.grsecurity.net/. The control flags will be read from
71255 + an otherwise reserved part of the ELF header. This marking has
71256 + numerous drawbacks (no support for soft-mode, toolchain does not
71257 + know about the non-standard use of the ELF header) therefore it
71258 + has been deprecated in favour of PT_PAX_FLAGS support.
71259 +
71260 + Note that if you enable PT_PAX_FLAGS marking support as well,
71261 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
71262 +
71263 +config PAX_PT_PAX_FLAGS
71264 + bool 'Use ELF program header marking'
71265 + help
71266 + Enabling this option will allow you to control PaX features on
71267 + a per executable basis via the 'paxctl' utility available at
71268 + http://pax.grsecurity.net/. The control flags will be read from
71269 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
71270 + has the benefits of supporting both soft mode and being fully
71271 + integrated into the toolchain (the binutils patch is available
71272 + from http://pax.grsecurity.net).
71273 +
71274 + If your toolchain does not support PT_PAX_FLAGS markings,
71275 + you can create one in most cases with 'paxctl -C'.
71276 +
71277 + Note that if you enable the legacy EI_PAX marking support as well,
71278 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
71279 +
71280 +choice
71281 + prompt 'MAC system integration'
71282 + default PAX_HAVE_ACL_FLAGS
71283 + help
71284 + Mandatory Access Control systems have the option of controlling
71285 + PaX flags on a per executable basis, choose the method supported
71286 + by your particular system.
71287 +
71288 + - "none": if your MAC system does not interact with PaX,
71289 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
71290 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
71291 +
71292 + NOTE: this option is for developers/integrators only.
71293 +
71294 + config PAX_NO_ACL_FLAGS
71295 + bool 'none'
71296 +
71297 + config PAX_HAVE_ACL_FLAGS
71298 + bool 'direct'
71299 +
71300 + config PAX_HOOK_ACL_FLAGS
71301 + bool 'hook'
71302 +endchoice
71303 +
71304 +endmenu
71305 +
71306 +menu "Non-executable pages"
71307 + depends on PAX
71308 +
71309 +config PAX_NOEXEC
71310 + bool "Enforce non-executable pages"
71311 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
71312 + help
71313 + By design some architectures do not allow for protecting memory
71314 + pages against execution or even if they do, Linux does not make
71315 + use of this feature. In practice this means that if a page is
71316 + readable (such as the stack or heap) it is also executable.
71317 +
71318 + There is a well known exploit technique that makes use of this
71319 + fact and a common programming mistake where an attacker can
71320 + introduce code of his choice somewhere in the attacked program's
71321 + memory (typically the stack or the heap) and then execute it.
71322 +
71323 + If the attacked program was running with different (typically
71324 + higher) privileges than that of the attacker, then he can elevate
71325 + his own privilege level (e.g. get a root shell, write to files for
71326 + which he does not have write access to, etc).
71327 +
71328 + Enabling this option will let you choose from various features
71329 + that prevent the injection and execution of 'foreign' code in
71330 + a program.
71331 +
71332 + This will also break programs that rely on the old behaviour and
71333 + expect that dynamically allocated memory via the malloc() family
71334 + of functions is executable (which it is not). Notable examples
71335 + are the XFree86 4.x server, the java runtime and wine.
71336 +
71337 +config PAX_PAGEEXEC
71338 + bool "Paging based non-executable pages"
71339 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
71340 + select S390_SWITCH_AMODE if S390
71341 + select S390_EXEC_PROTECT if S390
71342 + select ARCH_TRACK_EXEC_LIMIT if X86_32
71343 + help
71344 + This implementation is based on the paging feature of the CPU.
71345 + On i386 without hardware non-executable bit support there is a
71346 + variable but usually low performance impact, however on Intel's
71347 + P4 core based CPUs it is very high so you should not enable this
71348 + for kernels meant to be used on such CPUs.
71349 +
71350 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
71351 + with hardware non-executable bit support there is no performance
71352 + impact, on ppc the impact is negligible.
71353 +
71354 + Note that several architectures require various emulations due to
71355 + badly designed userland ABIs, this will cause a performance impact
71356 + but will disappear as soon as userland is fixed. For example, ppc
71357 + userland MUST have been built with secure-plt by a recent toolchain.
71358 +
71359 +config PAX_SEGMEXEC
71360 + bool "Segmentation based non-executable pages"
71361 + depends on PAX_NOEXEC && X86_32
71362 + help
71363 + This implementation is based on the segmentation feature of the
71364 + CPU and has a very small performance impact, however applications
71365 + will be limited to a 1.5 GB address space instead of the normal
71366 + 3 GB.
71367 +
71368 +config PAX_EMUTRAMP
71369 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
71370 + default y if PARISC
71371 + help
71372 + There are some programs and libraries that for one reason or
71373 + another attempt to execute special small code snippets from
71374 + non-executable memory pages. Most notable examples are the
71375 + signal handler return code generated by the kernel itself and
71376 + the GCC trampolines.
71377 +
71378 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
71379 + such programs will no longer work under your kernel.
71380 +
71381 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
71382 + utilities to enable trampoline emulation for the affected programs
71383 + yet still have the protection provided by the non-executable pages.
71384 +
71385 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
71386 + your system will not even boot.
71387 +
71388 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
71389 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
71390 + for the affected files.
71391 +
71392 + NOTE: enabling this feature *may* open up a loophole in the
71393 + protection provided by non-executable pages that an attacker
71394 + could abuse. Therefore the best solution is to not have any
71395 + files on your system that would require this option. This can
71396 + be achieved by not using libc5 (which relies on the kernel
71397 + signal handler return code) and not using or rewriting programs
71398 + that make use of the nested function implementation of GCC.
71399 + Skilled users can just fix GCC itself so that it implements
71400 + nested function calls in a way that does not interfere with PaX.
71401 +
71402 +config PAX_EMUSIGRT
71403 + bool "Automatically emulate sigreturn trampolines"
71404 + depends on PAX_EMUTRAMP && PARISC
71405 + default y
71406 + help
71407 + Enabling this option will have the kernel automatically detect
71408 + and emulate signal return trampolines executing on the stack
71409 + that would otherwise lead to task termination.
71410 +
71411 + This solution is intended as a temporary one for users with
71412 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
71413 + Modula-3 runtime, etc) or executables linked to such, basically
71414 + everything that does not specify its own SA_RESTORER function in
71415 + normal executable memory like glibc 2.1+ does.
71416 +
71417 + On parisc you MUST enable this option, otherwise your system will
71418 + not even boot.
71419 +
71420 + NOTE: this feature cannot be disabled on a per executable basis
71421 + and since it *does* open up a loophole in the protection provided
71422 + by non-executable pages, the best solution is to not have any
71423 + files on your system that would require this option.
71424 +
71425 +config PAX_MPROTECT
71426 + bool "Restrict mprotect()"
71427 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
71428 + help
71429 + Enabling this option will prevent programs from
71430 + - changing the executable status of memory pages that were
71431 + not originally created as executable,
71432 + - making read-only executable pages writable again,
71433 + - creating executable pages from anonymous memory,
71434 + - making read-only-after-relocations (RELRO) data pages writable again.
71435 +
71436 + You should say Y here to complete the protection provided by
71437 + the enforcement of non-executable pages.
71438 +
71439 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71440 + this feature on a per file basis.
71441 +
71442 +config PAX_MPROTECT_COMPAT
71443 + bool "Use legacy/compat protection demoting (read help)"
71444 + depends on PAX_MPROTECT
71445 + default n
71446 + help
71447 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
71448 + by sending the proper error code to the application. For some broken
71449 + userland, this can cause problems with Python or other applications. The
71450 + current implementation however allows for applications like clamav to
71451 + detect if JIT compilation/execution is allowed and to fall back gracefully
71452 + to an interpreter-based mode if it does not. While we encourage everyone
71453 + to use the current implementation as-is and push upstream to fix broken
71454 + userland (note that the RWX logging option can assist with this), in some
71455 + environments this may not be possible. Having to disable MPROTECT
71456 + completely on certain binaries reduces the security benefit of PaX,
71457 + so this option is provided for those environments to revert to the old
71458 + behavior.
71459 +
71460 +config PAX_ELFRELOCS
71461 + bool "Allow ELF text relocations (read help)"
71462 + depends on PAX_MPROTECT
71463 + default n
71464 + help
71465 + Non-executable pages and mprotect() restrictions are effective
71466 + in preventing the introduction of new executable code into an
71467 + attacked task's address space. There remain only two venues
71468 + for this kind of attack: if the attacker can execute already
71469 + existing code in the attacked task then he can either have it
71470 + create and mmap() a file containing his code or have it mmap()
71471 + an already existing ELF library that does not have position
71472 + independent code in it and use mprotect() on it to make it
71473 + writable and copy his code there. While protecting against
71474 + the former approach is beyond PaX, the latter can be prevented
71475 + by having only PIC ELF libraries on one's system (which do not
71476 + need to relocate their code). If you are sure this is your case,
71477 + as is the case with all modern Linux distributions, then leave
71478 + this option disabled. You should say 'n' here.
71479 +
71480 +config PAX_ETEXECRELOCS
71481 + bool "Allow ELF ET_EXEC text relocations"
71482 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
71483 + select PAX_ELFRELOCS
71484 + default y
71485 + help
71486 + On some architectures there are incorrectly created applications
71487 + that require text relocations and would not work without enabling
71488 + this option. If you are an alpha, ia64 or parisc user, you should
71489 + enable this option and disable it once you have made sure that
71490 + none of your applications need it.
71491 +
71492 +config PAX_EMUPLT
71493 + bool "Automatically emulate ELF PLT"
71494 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
71495 + default y
71496 + help
71497 + Enabling this option will have the kernel automatically detect
71498 + and emulate the Procedure Linkage Table entries in ELF files.
71499 + On some architectures such entries are in writable memory, and
71500 + become non-executable leading to task termination. Therefore
71501 + it is mandatory that you enable this option on alpha, parisc,
71502 + sparc and sparc64, otherwise your system would not even boot.
71503 +
71504 + NOTE: this feature *does* open up a loophole in the protection
71505 + provided by the non-executable pages, therefore the proper
71506 + solution is to modify the toolchain to produce a PLT that does
71507 + not need to be writable.
71508 +
71509 +config PAX_DLRESOLVE
71510 + bool 'Emulate old glibc resolver stub'
71511 + depends on PAX_EMUPLT && SPARC
71512 + default n
71513 + help
71514 + This option is needed if userland has an old glibc (before 2.4)
71515 + that puts a 'save' instruction into the runtime generated resolver
71516 + stub that needs special emulation.
71517 +
71518 +config PAX_KERNEXEC
71519 + bool "Enforce non-executable kernel pages"
71520 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
71521 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
71522 + help
71523 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
71524 + that is, enabling this option will make it harder to inject
71525 + and execute 'foreign' code in kernel memory itself.
71526 +
71527 + Note that on x86_64 kernels there is a known regression when
71528 + this feature and KVM/VMX are both enabled in the host kernel.
71529 +
71530 +config PAX_KERNEXEC_MODULE_TEXT
71531 + int "Minimum amount of memory reserved for module code"
71532 + default "4"
71533 + depends on PAX_KERNEXEC && X86_32 && MODULES
71534 + help
71535 + Due to implementation details the kernel must reserve a fixed
71536 + amount of memory for module code at compile time that cannot be
71537 + changed at runtime. Here you can specify the minimum amount
71538 + in MB that will be reserved. Due to the same implementation
71539 + details this size will always be rounded up to the next 2/4 MB
71540 + boundary (depends on PAE) so the actually available memory for
71541 + module code will usually be more than this minimum.
71542 +
71543 + The default 4 MB should be enough for most users but if you have
71544 + an excessive number of modules (e.g., most distribution configs
71545 + compile many drivers as modules) or use huge modules such as
71546 + nvidia's kernel driver, you will need to adjust this amount.
71547 + A good rule of thumb is to look at your currently loaded kernel
71548 + modules and add up their sizes.
71549 +
71550 +endmenu
71551 +
71552 +menu "Address Space Layout Randomization"
71553 + depends on PAX
71554 +
71555 +config PAX_ASLR
71556 + bool "Address Space Layout Randomization"
71557 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
71558 + help
71559 + Many if not most exploit techniques rely on the knowledge of
71560 + certain addresses in the attacked program. The following options
71561 + will allow the kernel to apply a certain amount of randomization
71562 + to specific parts of the program thereby forcing an attacker to
71563 + guess them in most cases. Any failed guess will most likely crash
71564 + the attacked program which allows the kernel to detect such attempts
71565 + and react on them. PaX itself provides no reaction mechanisms,
71566 + instead it is strongly encouraged that you make use of Nergal's
71567 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
71568 + (http://www.grsecurity.net/) built-in crash detection features or
71569 + develop one yourself.
71570 +
71571 + By saying Y here you can choose to randomize the following areas:
71572 + - top of the task's kernel stack
71573 + - top of the task's userland stack
71574 + - base address for mmap() requests that do not specify one
71575 + (this includes all libraries)
71576 + - base address of the main executable
71577 +
71578 + It is strongly recommended to say Y here as address space layout
71579 + randomization has negligible impact on performance yet it provides
71580 + a very effective protection.
71581 +
71582 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71583 + this feature on a per file basis.
71584 +
71585 +config PAX_RANDKSTACK
71586 + bool "Randomize kernel stack base"
71587 + depends on PAX_ASLR && X86_TSC && X86
71588 + help
71589 + By saying Y here the kernel will randomize every task's kernel
71590 + stack on every system call. This will not only force an attacker
71591 + to guess it but also prevent him from making use of possible
71592 + leaked information about it.
71593 +
71594 + Since the kernel stack is a rather scarce resource, randomization
71595 + may cause unexpected stack overflows, therefore you should very
71596 + carefully test your system. Note that once enabled in the kernel
71597 + configuration, this feature cannot be disabled on a per file basis.
71598 +
71599 +config PAX_RANDUSTACK
71600 + bool "Randomize user stack base"
71601 + depends on PAX_ASLR
71602 + help
71603 + By saying Y here the kernel will randomize every task's userland
71604 + stack. The randomization is done in two steps where the second
71605 + one may apply a big amount of shift to the top of the stack and
71606 + cause problems for programs that want to use lots of memory (more
71607 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71608 + For this reason the second step can be controlled by 'chpax' or
71609 + 'paxctl' on a per file basis.
71610 +
71611 +config PAX_RANDMMAP
71612 + bool "Randomize mmap() base"
71613 + depends on PAX_ASLR
71614 + help
71615 + By saying Y here the kernel will use a randomized base address for
71616 + mmap() requests that do not specify one themselves. As a result
71617 + all dynamically loaded libraries will appear at random addresses
71618 + and therefore be harder to exploit by a technique where an attacker
71619 + attempts to execute library code for his purposes (e.g. spawn a
71620 + shell from an exploited program that is running at an elevated
71621 + privilege level).
71622 +
71623 + Furthermore, if a program is relinked as a dynamic ELF file, its
71624 + base address will be randomized as well, completing the full
71625 + randomization of the address space layout. Attacking such programs
71626 + becomes a guess game. You can find an example of doing this at
71627 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71628 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71629 +
71630 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71631 + feature on a per file basis.
71632 +
71633 +endmenu
71634 +
71635 +menu "Miscellaneous hardening features"
71636 +
71637 +config PAX_MEMORY_SANITIZE
71638 + bool "Sanitize all freed memory"
71639 + help
71640 + By saying Y here the kernel will erase memory pages as soon as they
71641 + are freed. This in turn reduces the lifetime of data stored in the
71642 + pages, making it less likely that sensitive information such as
71643 + passwords, cryptographic secrets, etc stay in memory for too long.
71644 +
71645 + This is especially useful for programs whose runtime is short, long
71646 + lived processes and the kernel itself benefit from this as long as
71647 + they operate on whole memory pages and ensure timely freeing of pages
71648 + that may hold sensitive information.
71649 +
71650 + The tradeoff is performance impact, on a single CPU system kernel
71651 + compilation sees a 3% slowdown, other systems and workloads may vary
71652 + and you are advised to test this feature on your expected workload
71653 + before deploying it.
71654 +
71655 + Note that this feature does not protect data stored in live pages,
71656 + e.g., process memory swapped to disk may stay there for a long time.
71657 +
71658 +config PAX_MEMORY_STACKLEAK
71659 + bool "Sanitize kernel stack"
71660 + depends on X86
71661 + help
71662 + By saying Y here the kernel will erase the kernel stack before it
71663 + returns from a system call. This in turn reduces the information
71664 + that a kernel stack leak bug can reveal.
71665 +
71666 + Note that such a bug can still leak information that was put on
71667 + the stack by the current system call (the one eventually triggering
71668 + the bug) but traces of earlier system calls on the kernel stack
71669 + cannot leak anymore.
71670 +
71671 + The tradeoff is performance impact: on a single CPU system kernel
71672 + compilation sees a 1% slowdown, other systems and workloads may vary
71673 + and you are advised to test this feature on your expected workload
71674 + before deploying it.
71675 +
71676 + Note: full support for this feature requires gcc with plugin support
71677 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71678 + is not supported). Using older gcc versions means that functions
71679 + with large enough stack frames may leave uninitialized memory behind
71680 + that may be exposed to a later syscall leaking the stack.
71681 +
71682 +config PAX_MEMORY_UDEREF
71683 + bool "Prevent invalid userland pointer dereference"
71684 + depends on X86 && !UML_X86 && !XEN
71685 + select PAX_PER_CPU_PGD if X86_64
71686 + help
71687 + By saying Y here the kernel will be prevented from dereferencing
71688 + userland pointers in contexts where the kernel expects only kernel
71689 + pointers. This is both a useful runtime debugging feature and a
71690 + security measure that prevents exploiting a class of kernel bugs.
71691 +
71692 + The tradeoff is that some virtualization solutions may experience
71693 + a huge slowdown and therefore you should not enable this feature
71694 + for kernels meant to run in such environments. Whether a given VM
71695 + solution is affected or not is best determined by simply trying it
71696 + out, the performance impact will be obvious right on boot as this
71697 + mechanism engages from very early on. A good rule of thumb is that
71698 + VMs running on CPUs without hardware virtualization support (i.e.,
71699 + the majority of IA-32 CPUs) will likely experience the slowdown.
71700 +
71701 +config PAX_REFCOUNT
71702 + bool "Prevent various kernel object reference counter overflows"
71703 + depends on GRKERNSEC && (X86 || SPARC64)
71704 + help
71705 + By saying Y here the kernel will detect and prevent overflowing
71706 + various (but not all) kinds of object reference counters. Such
71707 + overflows can normally occur due to bugs only and are often, if
71708 + not always, exploitable.
71709 +
71710 + The tradeoff is that data structures protected by an overflowed
71711 + refcount will never be freed and therefore will leak memory. Note
71712 + that this leak also happens even without this protection but in
71713 + that case the overflow can eventually trigger the freeing of the
71714 + data structure while it is still being used elsewhere, resulting
71715 + in the exploitable situation that this feature prevents.
71716 +
71717 + Since this has a negligible performance impact, you should enable
71718 + this feature.
71719 +
71720 +config PAX_USERCOPY
71721 + bool "Harden heap object copies between kernel and userland"
71722 + depends on X86 || PPC || SPARC || ARM
71723 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
71724 + help
71725 + By saying Y here the kernel will enforce the size of heap objects
71726 + when they are copied in either direction between the kernel and
71727 + userland, even if only a part of the heap object is copied.
71728 +
71729 + Specifically, this checking prevents information leaking from the
71730 + kernel heap during kernel to userland copies (if the kernel heap
71731 + object is otherwise fully initialized) and prevents kernel heap
71732 + overflows during userland to kernel copies.
71733 +
71734 + Note that the current implementation provides the strictest bounds
71735 + checks for the SLUB allocator.
71736 +
71737 + Enabling this option also enables per-slab cache protection against
71738 + data in a given cache being copied into/out of via userland
71739 + accessors. Though the whitelist of regions will be reduced over
71740 + time, it notably protects important data structures like task structs.
71741 +
71742 + If frame pointers are enabled on x86, this option will also restrict
71743 + copies into and out of the kernel stack to local variables within a
71744 + single frame.
71745 +
71746 + Since this has a negligible performance impact, you should enable
71747 + this feature.
71748 +
71749 +endmenu
71750 +
71751 +endmenu
71752 +
71753 config KEYS
71754 bool "Enable access key retention support"
71755 help
71756 @@ -167,7 +715,7 @@ config INTEL_TXT
71757 config LSM_MMAP_MIN_ADDR
71758 int "Low address space for LSM to protect from user allocation"
71759 depends on SECURITY && SECURITY_SELINUX
71760 - default 32768 if ARM
71761 + default 32768 if ALPHA || ARM || PARISC || SPARC32
71762 default 65536
71763 help
71764 This is the portion of low virtual memory which should be protected
71765 diff -urNp linux-3.0.4/security/keys/keyring.c linux-3.0.4/security/keys/keyring.c
71766 --- linux-3.0.4/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
71767 +++ linux-3.0.4/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
71768 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
71769 ret = -EFAULT;
71770
71771 for (loop = 0; loop < klist->nkeys; loop++) {
71772 + key_serial_t serial;
71773 key = klist->keys[loop];
71774 + serial = key->serial;
71775
71776 tmp = sizeof(key_serial_t);
71777 if (tmp > buflen)
71778 tmp = buflen;
71779
71780 - if (copy_to_user(buffer,
71781 - &key->serial,
71782 - tmp) != 0)
71783 + if (copy_to_user(buffer, &serial, tmp))
71784 goto error;
71785
71786 buflen -= tmp;
71787 diff -urNp linux-3.0.4/security/min_addr.c linux-3.0.4/security/min_addr.c
71788 --- linux-3.0.4/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
71789 +++ linux-3.0.4/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
71790 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71791 */
71792 static void update_mmap_min_addr(void)
71793 {
71794 +#ifndef SPARC
71795 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71796 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71797 mmap_min_addr = dac_mmap_min_addr;
71798 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71799 #else
71800 mmap_min_addr = dac_mmap_min_addr;
71801 #endif
71802 +#endif
71803 }
71804
71805 /*
71806 diff -urNp linux-3.0.4/security/security.c linux-3.0.4/security/security.c
71807 --- linux-3.0.4/security/security.c 2011-07-21 22:17:23.000000000 -0400
71808 +++ linux-3.0.4/security/security.c 2011-08-23 21:48:14.000000000 -0400
71809 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
71810 /* things that live in capability.c */
71811 extern void __init security_fixup_ops(struct security_operations *ops);
71812
71813 -static struct security_operations *security_ops;
71814 -static struct security_operations default_security_ops = {
71815 +static struct security_operations *security_ops __read_only;
71816 +static struct security_operations default_security_ops __read_only = {
71817 .name = "default",
71818 };
71819
71820 @@ -67,7 +67,9 @@ int __init security_init(void)
71821
71822 void reset_security_ops(void)
71823 {
71824 + pax_open_kernel();
71825 security_ops = &default_security_ops;
71826 + pax_close_kernel();
71827 }
71828
71829 /* Save user chosen LSM */
71830 diff -urNp linux-3.0.4/security/selinux/hooks.c linux-3.0.4/security/selinux/hooks.c
71831 --- linux-3.0.4/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
71832 +++ linux-3.0.4/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
71833 @@ -93,7 +93,6 @@
71834 #define NUM_SEL_MNT_OPTS 5
71835
71836 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
71837 -extern struct security_operations *security_ops;
71838
71839 /* SECMARK reference count */
71840 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
71841 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
71842
71843 #endif
71844
71845 -static struct security_operations selinux_ops = {
71846 +static struct security_operations selinux_ops __read_only = {
71847 .name = "selinux",
71848
71849 .ptrace_access_check = selinux_ptrace_access_check,
71850 diff -urNp linux-3.0.4/security/selinux/include/xfrm.h linux-3.0.4/security/selinux/include/xfrm.h
71851 --- linux-3.0.4/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
71852 +++ linux-3.0.4/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
71853 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71854
71855 static inline void selinux_xfrm_notify_policyload(void)
71856 {
71857 - atomic_inc(&flow_cache_genid);
71858 + atomic_inc_unchecked(&flow_cache_genid);
71859 }
71860 #else
71861 static inline int selinux_xfrm_enabled(void)
71862 diff -urNp linux-3.0.4/security/selinux/ss/services.c linux-3.0.4/security/selinux/ss/services.c
71863 --- linux-3.0.4/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
71864 +++ linux-3.0.4/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
71865 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
71866 int rc = 0;
71867 struct policy_file file = { data, len }, *fp = &file;
71868
71869 + pax_track_stack();
71870 +
71871 if (!ss_initialized) {
71872 avtab_cache_init();
71873 rc = policydb_read(&policydb, fp);
71874 diff -urNp linux-3.0.4/security/smack/smack_lsm.c linux-3.0.4/security/smack/smack_lsm.c
71875 --- linux-3.0.4/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
71876 +++ linux-3.0.4/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
71877 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
71878 return 0;
71879 }
71880
71881 -struct security_operations smack_ops = {
71882 +struct security_operations smack_ops __read_only = {
71883 .name = "smack",
71884
71885 .ptrace_access_check = smack_ptrace_access_check,
71886 diff -urNp linux-3.0.4/security/tomoyo/tomoyo.c linux-3.0.4/security/tomoyo/tomoyo.c
71887 --- linux-3.0.4/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
71888 +++ linux-3.0.4/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
71889 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
71890 * tomoyo_security_ops is a "struct security_operations" which is used for
71891 * registering TOMOYO.
71892 */
71893 -static struct security_operations tomoyo_security_ops = {
71894 +static struct security_operations tomoyo_security_ops __read_only = {
71895 .name = "tomoyo",
71896 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71897 .cred_prepare = tomoyo_cred_prepare,
71898 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.c linux-3.0.4/sound/aoa/codecs/onyx.c
71899 --- linux-3.0.4/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
71900 +++ linux-3.0.4/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
71901 @@ -54,7 +54,7 @@ struct onyx {
71902 spdif_locked:1,
71903 analog_locked:1,
71904 original_mute:2;
71905 - int open_count;
71906 + local_t open_count;
71907 struct codec_info *codec_info;
71908
71909 /* mutex serializes concurrent access to the device
71910 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
71911 struct onyx *onyx = cii->codec_data;
71912
71913 mutex_lock(&onyx->mutex);
71914 - onyx->open_count++;
71915 + local_inc(&onyx->open_count);
71916 mutex_unlock(&onyx->mutex);
71917
71918 return 0;
71919 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
71920 struct onyx *onyx = cii->codec_data;
71921
71922 mutex_lock(&onyx->mutex);
71923 - onyx->open_count--;
71924 - if (!onyx->open_count)
71925 + if (local_dec_and_test(&onyx->open_count))
71926 onyx->spdif_locked = onyx->analog_locked = 0;
71927 mutex_unlock(&onyx->mutex);
71928
71929 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.h linux-3.0.4/sound/aoa/codecs/onyx.h
71930 --- linux-3.0.4/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
71931 +++ linux-3.0.4/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
71932 @@ -11,6 +11,7 @@
71933 #include <linux/i2c.h>
71934 #include <asm/pmac_low_i2c.h>
71935 #include <asm/prom.h>
71936 +#include <asm/local.h>
71937
71938 /* PCM3052 register definitions */
71939
71940 diff -urNp linux-3.0.4/sound/core/seq/seq_device.c linux-3.0.4/sound/core/seq/seq_device.c
71941 --- linux-3.0.4/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
71942 +++ linux-3.0.4/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
71943 @@ -63,7 +63,7 @@ struct ops_list {
71944 int argsize; /* argument size */
71945
71946 /* operators */
71947 - struct snd_seq_dev_ops ops;
71948 + struct snd_seq_dev_ops *ops;
71949
71950 /* registred devices */
71951 struct list_head dev_list; /* list of devices */
71952 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
71953
71954 mutex_lock(&ops->reg_mutex);
71955 /* copy driver operators */
71956 - ops->ops = *entry;
71957 + ops->ops = entry;
71958 ops->driver |= DRIVER_LOADED;
71959 ops->argsize = argsize;
71960
71961 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
71962 dev->name, ops->id, ops->argsize, dev->argsize);
71963 return -EINVAL;
71964 }
71965 - if (ops->ops.init_device(dev) >= 0) {
71966 + if (ops->ops->init_device(dev) >= 0) {
71967 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
71968 ops->num_init_devices++;
71969 } else {
71970 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
71971 dev->name, ops->id, ops->argsize, dev->argsize);
71972 return -EINVAL;
71973 }
71974 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
71975 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
71976 dev->status = SNDRV_SEQ_DEVICE_FREE;
71977 dev->driver_data = NULL;
71978 ops->num_init_devices--;
71979 diff -urNp linux-3.0.4/sound/drivers/mts64.c linux-3.0.4/sound/drivers/mts64.c
71980 --- linux-3.0.4/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
71981 +++ linux-3.0.4/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
71982 @@ -28,6 +28,7 @@
71983 #include <sound/initval.h>
71984 #include <sound/rawmidi.h>
71985 #include <sound/control.h>
71986 +#include <asm/local.h>
71987
71988 #define CARD_NAME "Miditerminal 4140"
71989 #define DRIVER_NAME "MTS64"
71990 @@ -66,7 +67,7 @@ struct mts64 {
71991 struct pardevice *pardev;
71992 int pardev_claimed;
71993
71994 - int open_count;
71995 + local_t open_count;
71996 int current_midi_output_port;
71997 int current_midi_input_port;
71998 u8 mode[MTS64_NUM_INPUT_PORTS];
71999 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
72000 {
72001 struct mts64 *mts = substream->rmidi->private_data;
72002
72003 - if (mts->open_count == 0) {
72004 + if (local_read(&mts->open_count) == 0) {
72005 /* We don't need a spinlock here, because this is just called
72006 if the device has not been opened before.
72007 So there aren't any IRQs from the device */
72008 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
72009
72010 msleep(50);
72011 }
72012 - ++(mts->open_count);
72013 + local_inc(&mts->open_count);
72014
72015 return 0;
72016 }
72017 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
72018 struct mts64 *mts = substream->rmidi->private_data;
72019 unsigned long flags;
72020
72021 - --(mts->open_count);
72022 - if (mts->open_count == 0) {
72023 + if (local_dec_return(&mts->open_count) == 0) {
72024 /* We need the spinlock_irqsave here because we can still
72025 have IRQs at this point */
72026 spin_lock_irqsave(&mts->lock, flags);
72027 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
72028
72029 msleep(500);
72030
72031 - } else if (mts->open_count < 0)
72032 - mts->open_count = 0;
72033 + } else if (local_read(&mts->open_count) < 0)
72034 + local_set(&mts->open_count, 0);
72035
72036 return 0;
72037 }
72038 diff -urNp linux-3.0.4/sound/drivers/opl4/opl4_lib.c linux-3.0.4/sound/drivers/opl4/opl4_lib.c
72039 --- linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
72040 +++ linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
72041 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
72042 MODULE_DESCRIPTION("OPL4 driver");
72043 MODULE_LICENSE("GPL");
72044
72045 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
72046 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
72047 {
72048 int timeout = 10;
72049 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
72050 diff -urNp linux-3.0.4/sound/drivers/portman2x4.c linux-3.0.4/sound/drivers/portman2x4.c
72051 --- linux-3.0.4/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
72052 +++ linux-3.0.4/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
72053 @@ -47,6 +47,7 @@
72054 #include <sound/initval.h>
72055 #include <sound/rawmidi.h>
72056 #include <sound/control.h>
72057 +#include <asm/local.h>
72058
72059 #define CARD_NAME "Portman 2x4"
72060 #define DRIVER_NAME "portman"
72061 @@ -84,7 +85,7 @@ struct portman {
72062 struct pardevice *pardev;
72063 int pardev_claimed;
72064
72065 - int open_count;
72066 + local_t open_count;
72067 int mode[PORTMAN_NUM_INPUT_PORTS];
72068 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
72069 };
72070 diff -urNp linux-3.0.4/sound/firewire/amdtp.c linux-3.0.4/sound/firewire/amdtp.c
72071 --- linux-3.0.4/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
72072 +++ linux-3.0.4/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
72073 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
72074 ptr = s->pcm_buffer_pointer + data_blocks;
72075 if (ptr >= pcm->runtime->buffer_size)
72076 ptr -= pcm->runtime->buffer_size;
72077 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
72078 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
72079
72080 s->pcm_period_pointer += data_blocks;
72081 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
72082 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
72083 */
72084 void amdtp_out_stream_update(struct amdtp_out_stream *s)
72085 {
72086 - ACCESS_ONCE(s->source_node_id_field) =
72087 + ACCESS_ONCE_RW(s->source_node_id_field) =
72088 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
72089 }
72090 EXPORT_SYMBOL(amdtp_out_stream_update);
72091 diff -urNp linux-3.0.4/sound/firewire/amdtp.h linux-3.0.4/sound/firewire/amdtp.h
72092 --- linux-3.0.4/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
72093 +++ linux-3.0.4/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
72094 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
72095 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
72096 struct snd_pcm_substream *pcm)
72097 {
72098 - ACCESS_ONCE(s->pcm) = pcm;
72099 + ACCESS_ONCE_RW(s->pcm) = pcm;
72100 }
72101
72102 /**
72103 diff -urNp linux-3.0.4/sound/firewire/isight.c linux-3.0.4/sound/firewire/isight.c
72104 --- linux-3.0.4/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
72105 +++ linux-3.0.4/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
72106 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
72107 ptr += count;
72108 if (ptr >= runtime->buffer_size)
72109 ptr -= runtime->buffer_size;
72110 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
72111 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
72112
72113 isight->period_counter += count;
72114 if (isight->period_counter >= runtime->period_size) {
72115 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
72116 if (err < 0)
72117 return err;
72118
72119 - ACCESS_ONCE(isight->pcm_active) = true;
72120 + ACCESS_ONCE_RW(isight->pcm_active) = true;
72121
72122 return 0;
72123 }
72124 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
72125 {
72126 struct isight *isight = substream->private_data;
72127
72128 - ACCESS_ONCE(isight->pcm_active) = false;
72129 + ACCESS_ONCE_RW(isight->pcm_active) = false;
72130
72131 mutex_lock(&isight->mutex);
72132 isight_stop_streaming(isight);
72133 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
72134
72135 switch (cmd) {
72136 case SNDRV_PCM_TRIGGER_START:
72137 - ACCESS_ONCE(isight->pcm_running) = true;
72138 + ACCESS_ONCE_RW(isight->pcm_running) = true;
72139 break;
72140 case SNDRV_PCM_TRIGGER_STOP:
72141 - ACCESS_ONCE(isight->pcm_running) = false;
72142 + ACCESS_ONCE_RW(isight->pcm_running) = false;
72143 break;
72144 default:
72145 return -EINVAL;
72146 diff -urNp linux-3.0.4/sound/isa/cmi8330.c linux-3.0.4/sound/isa/cmi8330.c
72147 --- linux-3.0.4/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
72148 +++ linux-3.0.4/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
72149 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
72150
72151 struct snd_pcm *pcm;
72152 struct snd_cmi8330_stream {
72153 - struct snd_pcm_ops ops;
72154 + snd_pcm_ops_no_const ops;
72155 snd_pcm_open_callback_t open;
72156 void *private_data; /* sb or wss */
72157 } streams[2];
72158 diff -urNp linux-3.0.4/sound/oss/sb_audio.c linux-3.0.4/sound/oss/sb_audio.c
72159 --- linux-3.0.4/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
72160 +++ linux-3.0.4/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
72161 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
72162 buf16 = (signed short *)(localbuf + localoffs);
72163 while (c)
72164 {
72165 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72166 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72167 if (copy_from_user(lbuf8,
72168 userbuf+useroffs + p,
72169 locallen))
72170 diff -urNp linux-3.0.4/sound/oss/swarm_cs4297a.c linux-3.0.4/sound/oss/swarm_cs4297a.c
72171 --- linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
72172 +++ linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
72173 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
72174 {
72175 struct cs4297a_state *s;
72176 u32 pwr, id;
72177 - mm_segment_t fs;
72178 int rval;
72179 #ifndef CONFIG_BCM_CS4297A_CSWARM
72180 u64 cfg;
72181 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
72182 if (!rval) {
72183 char *sb1250_duart_present;
72184
72185 +#if 0
72186 + mm_segment_t fs;
72187 fs = get_fs();
72188 set_fs(KERNEL_DS);
72189 -#if 0
72190 val = SOUND_MASK_LINE;
72191 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
72192 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
72193 val = initvol[i].vol;
72194 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
72195 }
72196 + set_fs(fs);
72197 // cs4297a_write_ac97(s, 0x18, 0x0808);
72198 #else
72199 // cs4297a_write_ac97(s, 0x5e, 0x180);
72200 cs4297a_write_ac97(s, 0x02, 0x0808);
72201 cs4297a_write_ac97(s, 0x18, 0x0808);
72202 #endif
72203 - set_fs(fs);
72204
72205 list_add(&s->list, &cs4297a_devs);
72206
72207 diff -urNp linux-3.0.4/sound/pci/hda/hda_codec.h linux-3.0.4/sound/pci/hda/hda_codec.h
72208 --- linux-3.0.4/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
72209 +++ linux-3.0.4/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
72210 @@ -615,7 +615,7 @@ struct hda_bus_ops {
72211 /* notify power-up/down from codec to controller */
72212 void (*pm_notify)(struct hda_bus *bus);
72213 #endif
72214 -};
72215 +} __no_const;
72216
72217 /* template to pass to the bus constructor */
72218 struct hda_bus_template {
72219 @@ -713,6 +713,7 @@ struct hda_codec_ops {
72220 #endif
72221 void (*reboot_notify)(struct hda_codec *codec);
72222 };
72223 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
72224
72225 /* record for amp information cache */
72226 struct hda_cache_head {
72227 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
72228 struct snd_pcm_substream *substream);
72229 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
72230 struct snd_pcm_substream *substream);
72231 -};
72232 +} __no_const;
72233
72234 /* PCM information for each substream */
72235 struct hda_pcm_stream {
72236 @@ -801,7 +802,7 @@ struct hda_codec {
72237 const char *modelname; /* model name for preset */
72238
72239 /* set by patch */
72240 - struct hda_codec_ops patch_ops;
72241 + hda_codec_ops_no_const patch_ops;
72242
72243 /* PCM to create, set by patch_ops.build_pcms callback */
72244 unsigned int num_pcms;
72245 diff -urNp linux-3.0.4/sound/pci/ice1712/ice1712.h linux-3.0.4/sound/pci/ice1712/ice1712.h
72246 --- linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
72247 +++ linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
72248 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
72249 unsigned int mask_flags; /* total mask bits */
72250 struct snd_akm4xxx_ops {
72251 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
72252 - } ops;
72253 + } __no_const ops;
72254 };
72255
72256 struct snd_ice1712_spdif {
72257 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
72258 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72259 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72260 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72261 - } ops;
72262 + } __no_const ops;
72263 };
72264
72265
72266 diff -urNp linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c
72267 --- linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
72268 +++ linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
72269 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
72270 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
72271 break;
72272 }
72273 - if (atomic_read(&chip->interrupt_sleep_count)) {
72274 - atomic_set(&chip->interrupt_sleep_count, 0);
72275 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72276 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72277 wake_up(&chip->interrupt_sleep);
72278 }
72279 __end:
72280 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
72281 continue;
72282 init_waitqueue_entry(&wait, current);
72283 add_wait_queue(&chip->interrupt_sleep, &wait);
72284 - atomic_inc(&chip->interrupt_sleep_count);
72285 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
72286 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
72287 remove_wait_queue(&chip->interrupt_sleep, &wait);
72288 }
72289 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
72290 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
72291 spin_unlock(&chip->reg_lock);
72292
72293 - if (atomic_read(&chip->interrupt_sleep_count)) {
72294 - atomic_set(&chip->interrupt_sleep_count, 0);
72295 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72296 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72297 wake_up(&chip->interrupt_sleep);
72298 }
72299 }
72300 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
72301 spin_lock_init(&chip->reg_lock);
72302 spin_lock_init(&chip->voice_lock);
72303 init_waitqueue_head(&chip->interrupt_sleep);
72304 - atomic_set(&chip->interrupt_sleep_count, 0);
72305 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72306 chip->card = card;
72307 chip->pci = pci;
72308 chip->irq = -1;
72309 diff -urNp linux-3.0.4/sound/soc/soc-core.c linux-3.0.4/sound/soc/soc-core.c
72310 --- linux-3.0.4/sound/soc/soc-core.c 2011-08-23 21:44:40.000000000 -0400
72311 +++ linux-3.0.4/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
72312 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
72313 }
72314
72315 /* ASoC PCM operations */
72316 -static struct snd_pcm_ops soc_pcm_ops = {
72317 +static snd_pcm_ops_no_const soc_pcm_ops = {
72318 .open = soc_pcm_open,
72319 .close = soc_codec_close,
72320 .hw_params = soc_pcm_hw_params,
72321 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
72322 rtd->pcm = pcm;
72323 pcm->private_data = rtd;
72324 if (platform->driver->ops) {
72325 + /* this whole logic is broken... */
72326 soc_pcm_ops.mmap = platform->driver->ops->mmap;
72327 soc_pcm_ops.pointer = platform->driver->ops->pointer;
72328 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
72329 diff -urNp linux-3.0.4/sound/usb/card.h linux-3.0.4/sound/usb/card.h
72330 --- linux-3.0.4/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
72331 +++ linux-3.0.4/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
72332 @@ -44,6 +44,7 @@ struct snd_urb_ops {
72333 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
72334 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
72335 };
72336 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
72337
72338 struct snd_usb_substream {
72339 struct snd_usb_stream *stream;
72340 @@ -93,7 +94,7 @@ struct snd_usb_substream {
72341 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
72342 spinlock_t lock;
72343
72344 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
72345 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
72346 };
72347
72348 struct snd_usb_stream {
72349 diff -urNp linux-3.0.4/tools/gcc/constify_plugin.c linux-3.0.4/tools/gcc/constify_plugin.c
72350 --- linux-3.0.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
72351 +++ linux-3.0.4/tools/gcc/constify_plugin.c 2011-08-29 22:01:36.000000000 -0400
72352 @@ -0,0 +1,289 @@
72353 +/*
72354 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
72355 + * Licensed under the GPL v2, or (at your option) v3
72356 + *
72357 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
72358 + *
72359 + * Usage:
72360 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
72361 + * $ gcc -fplugin=constify_plugin.so test.c -O2
72362 + */
72363 +
72364 +#include "gcc-plugin.h"
72365 +#include "config.h"
72366 +#include "system.h"
72367 +#include "coretypes.h"
72368 +#include "tree.h"
72369 +#include "tree-pass.h"
72370 +#include "intl.h"
72371 +#include "plugin-version.h"
72372 +#include "tm.h"
72373 +#include "toplev.h"
72374 +#include "function.h"
72375 +#include "tree-flow.h"
72376 +#include "plugin.h"
72377 +#include "diagnostic.h"
72378 +//#include "c-tree.h"
72379 +
72380 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
72381 +
72382 +int plugin_is_GPL_compatible;
72383 +
72384 +static struct plugin_info const_plugin_info = {
72385 + .version = "20110826",
72386 + .help = "no-constify\tturn off constification\n",
72387 +};
72388 +
72389 +static void constify_type(tree type);
72390 +static bool walk_struct(tree node);
72391 +
72392 +static tree deconstify_type(tree old_type)
72393 +{
72394 + tree new_type, field;
72395 +
72396 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
72397 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
72398 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
72399 + DECL_FIELD_CONTEXT(field) = new_type;
72400 + TYPE_READONLY(new_type) = 0;
72401 + C_TYPE_FIELDS_READONLY(new_type) = 0;
72402 + return new_type;
72403 +}
72404 +
72405 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
72406 +{
72407 + tree type;
72408 +
72409 + *no_add_attrs = true;
72410 + if (TREE_CODE(*node) == FUNCTION_DECL) {
72411 + error("%qE attribute does not apply to functions", name);
72412 + return NULL_TREE;
72413 + }
72414 +
72415 + if (TREE_CODE(*node) == VAR_DECL) {
72416 + error("%qE attribute does not apply to variables", name);
72417 + return NULL_TREE;
72418 + }
72419 +
72420 + if (TYPE_P(*node)) {
72421 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
72422 + *no_add_attrs = false;
72423 + else
72424 + error("%qE attribute applies to struct and union types only", name);
72425 + return NULL_TREE;
72426 + }
72427 +
72428 + type = TREE_TYPE(*node);
72429 +
72430 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
72431 + error("%qE attribute applies to struct and union types only", name);
72432 + return NULL_TREE;
72433 + }
72434 +
72435 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
72436 + error("%qE attribute is already applied to the type", name);
72437 + return NULL_TREE;
72438 + }
72439 +
72440 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
72441 + error("%qE attribute used on type that is not constified", name);
72442 + return NULL_TREE;
72443 + }
72444 +
72445 + if (TREE_CODE(*node) == TYPE_DECL) {
72446 + TREE_TYPE(*node) = deconstify_type(type);
72447 + TREE_READONLY(*node) = 0;
72448 + return NULL_TREE;
72449 + }
72450 +
72451 + return NULL_TREE;
72452 +}
72453 +
72454 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
72455 +{
72456 + *no_add_attrs = true;
72457 + if (!TYPE_P(*node)) {
72458 + error("%qE attribute applies to types only", name);
72459 + return NULL_TREE;
72460 + }
72461 +
72462 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
72463 + error("%qE attribute applies to struct and union types only", name);
72464 + return NULL_TREE;
72465 + }
72466 +
72467 + *no_add_attrs = false;
72468 + constify_type(*node);
72469 + return NULL_TREE;
72470 +}
72471 +
72472 +static struct attribute_spec no_const_attr = {
72473 + .name = "no_const",
72474 + .min_length = 0,
72475 + .max_length = 0,
72476 + .decl_required = false,
72477 + .type_required = false,
72478 + .function_type_required = false,
72479 + .handler = handle_no_const_attribute
72480 +};
72481 +
72482 +static struct attribute_spec do_const_attr = {
72483 + .name = "do_const",
72484 + .min_length = 0,
72485 + .max_length = 0,
72486 + .decl_required = false,
72487 + .type_required = false,
72488 + .function_type_required = false,
72489 + .handler = handle_do_const_attribute
72490 +};
72491 +
72492 +static void register_attributes(void *event_data, void *data)
72493 +{
72494 + register_attribute(&no_const_attr);
72495 + register_attribute(&do_const_attr);
72496 +}
72497 +
72498 +static void constify_type(tree type)
72499 +{
72500 + TYPE_READONLY(type) = 1;
72501 + C_TYPE_FIELDS_READONLY(type) = 1;
72502 +}
72503 +
72504 +static bool is_fptr(tree field)
72505 +{
72506 + tree ptr = TREE_TYPE(field);
72507 +
72508 + if (TREE_CODE(ptr) != POINTER_TYPE)
72509 + return false;
72510 +
72511 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
72512 +}
72513 +
72514 +static bool walk_struct(tree node)
72515 +{
72516 + tree field;
72517 +
72518 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
72519 + return false;
72520 +
72521 + if (TYPE_FIELDS(node) == NULL_TREE)
72522 + return false;
72523 +
72524 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
72525 + tree type = TREE_TYPE(field);
72526 + enum tree_code code = TREE_CODE(type);
72527 + if (code == RECORD_TYPE || code == UNION_TYPE) {
72528 + if (!(walk_struct(type)))
72529 + return false;
72530 + } else if (!is_fptr(field) && !TREE_READONLY(field))
72531 + return false;
72532 + }
72533 + return true;
72534 +}
72535 +
72536 +static void finish_type(void *event_data, void *data)
72537 +{
72538 + tree type = (tree)event_data;
72539 +
72540 + if (type == NULL_TREE)
72541 + return;
72542 +
72543 + if (TYPE_READONLY(type))
72544 + return;
72545 +
72546 + if (walk_struct(type))
72547 + constify_type(type);
72548 +}
72549 +
72550 +static unsigned int check_local_variables(void);
72551 +
72552 +struct gimple_opt_pass pass_local_variable = {
72553 + {
72554 + .type = GIMPLE_PASS,
72555 + .name = "check_local_variables",
72556 + .gate = NULL,
72557 + .execute = check_local_variables,
72558 + .sub = NULL,
72559 + .next = NULL,
72560 + .static_pass_number = 0,
72561 + .tv_id = TV_NONE,
72562 + .properties_required = 0,
72563 + .properties_provided = 0,
72564 + .properties_destroyed = 0,
72565 + .todo_flags_start = 0,
72566 + .todo_flags_finish = 0
72567 + }
72568 +};
72569 +
72570 +static unsigned int check_local_variables(void)
72571 +{
72572 + tree var;
72573 + referenced_var_iterator rvi;
72574 +
72575 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
72576 + FOR_EACH_REFERENCED_VAR(var, rvi) {
72577 +#else
72578 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
72579 +#endif
72580 + tree type = TREE_TYPE(var);
72581 +
72582 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
72583 + continue;
72584 +
72585 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
72586 + continue;
72587 +
72588 + if (!TYPE_READONLY(type))
72589 + continue;
72590 +
72591 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
72592 +// continue;
72593 +
72594 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
72595 +// continue;
72596 +
72597 + if (walk_struct(type)) {
72598 + error("constified variable %qE cannot be local", var);
72599 + return 1;
72600 + }
72601 + }
72602 + return 0;
72603 +}
72604 +
72605 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72606 +{
72607 + const char * const plugin_name = plugin_info->base_name;
72608 + const int argc = plugin_info->argc;
72609 + const struct plugin_argument * const argv = plugin_info->argv;
72610 + int i;
72611 + bool constify = true;
72612 +
72613 + struct register_pass_info local_variable_pass_info = {
72614 + .pass = &pass_local_variable.pass,
72615 + .reference_pass_name = "*referenced_vars",
72616 + .ref_pass_instance_number = 0,
72617 + .pos_op = PASS_POS_INSERT_AFTER
72618 + };
72619 +
72620 + if (!plugin_default_version_check(version, &gcc_version)) {
72621 + error(G_("incompatible gcc/plugin versions"));
72622 + return 1;
72623 + }
72624 +
72625 + for (i = 0; i < argc; ++i) {
72626 + if (!(strcmp(argv[i].key, "no-constify"))) {
72627 + constify = false;
72628 + continue;
72629 + }
72630 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72631 + }
72632 +
72633 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
72634 + if (constify) {
72635 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
72636 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
72637 + }
72638 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
72639 +
72640 + return 0;
72641 +}
72642 diff -urNp linux-3.0.4/tools/gcc/Makefile linux-3.0.4/tools/gcc/Makefile
72643 --- linux-3.0.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
72644 +++ linux-3.0.4/tools/gcc/Makefile 2011-08-23 21:47:56.000000000 -0400
72645 @@ -0,0 +1,12 @@
72646 +#CC := gcc
72647 +#PLUGIN_SOURCE_FILES := pax_plugin.c
72648 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
72649 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
72650 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
72651 +
72652 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
72653 +
72654 +hostlibs-y := stackleak_plugin.so constify_plugin.so
72655 +always := $(hostlibs-y)
72656 +stackleak_plugin-objs := stackleak_plugin.o
72657 +constify_plugin-objs := constify_plugin.o
72658 diff -urNp linux-3.0.4/tools/gcc/stackleak_plugin.c linux-3.0.4/tools/gcc/stackleak_plugin.c
72659 --- linux-3.0.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
72660 +++ linux-3.0.4/tools/gcc/stackleak_plugin.c 2011-08-23 21:47:56.000000000 -0400
72661 @@ -0,0 +1,243 @@
72662 +/*
72663 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
72664 + * Licensed under the GPL v2
72665 + *
72666 + * Note: the choice of the license means that the compilation process is
72667 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
72668 + * but for the kernel it doesn't matter since it doesn't link against
72669 + * any of the gcc libraries
72670 + *
72671 + * gcc plugin to help implement various PaX features
72672 + *
72673 + * - track lowest stack pointer
72674 + *
72675 + * TODO:
72676 + * - initialize all local variables
72677 + *
72678 + * BUGS:
72679 + * - cloned functions are instrumented twice
72680 + */
72681 +#include "gcc-plugin.h"
72682 +#include "config.h"
72683 +#include "system.h"
72684 +#include "coretypes.h"
72685 +#include "tree.h"
72686 +#include "tree-pass.h"
72687 +#include "intl.h"
72688 +#include "plugin-version.h"
72689 +#include "tm.h"
72690 +#include "toplev.h"
72691 +#include "basic-block.h"
72692 +#include "gimple.h"
72693 +//#include "expr.h" where are you...
72694 +#include "diagnostic.h"
72695 +#include "rtl.h"
72696 +#include "emit-rtl.h"
72697 +#include "function.h"
72698 +
72699 +int plugin_is_GPL_compatible;
72700 +
72701 +static int track_frame_size = -1;
72702 +static const char track_function[] = "pax_track_stack";
72703 +static bool init_locals;
72704 +
72705 +static struct plugin_info stackleak_plugin_info = {
72706 + .version = "201106030000",
72707 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
72708 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
72709 +};
72710 +
72711 +static bool gate_stackleak_track_stack(void);
72712 +static unsigned int execute_stackleak_tree_instrument(void);
72713 +static unsigned int execute_stackleak_final(void);
72714 +
72715 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
72716 + .pass = {
72717 + .type = GIMPLE_PASS,
72718 + .name = "stackleak_tree_instrument",
72719 + .gate = gate_stackleak_track_stack,
72720 + .execute = execute_stackleak_tree_instrument,
72721 + .sub = NULL,
72722 + .next = NULL,
72723 + .static_pass_number = 0,
72724 + .tv_id = TV_NONE,
72725 + .properties_required = PROP_gimple_leh | PROP_cfg,
72726 + .properties_provided = 0,
72727 + .properties_destroyed = 0,
72728 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
72729 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
72730 + }
72731 +};
72732 +
72733 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
72734 + .pass = {
72735 + .type = RTL_PASS,
72736 + .name = "stackleak_final",
72737 + .gate = gate_stackleak_track_stack,
72738 + .execute = execute_stackleak_final,
72739 + .sub = NULL,
72740 + .next = NULL,
72741 + .static_pass_number = 0,
72742 + .tv_id = TV_NONE,
72743 + .properties_required = 0,
72744 + .properties_provided = 0,
72745 + .properties_destroyed = 0,
72746 + .todo_flags_start = 0,
72747 + .todo_flags_finish = 0
72748 + }
72749 +};
72750 +
72751 +static bool gate_stackleak_track_stack(void)
72752 +{
72753 + return track_frame_size >= 0;
72754 +}
72755 +
72756 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72757 +{
72758 + gimple call;
72759 + tree decl, type;
72760 +
72761 + // insert call to void pax_track_stack(void)
72762 + type = build_function_type_list(void_type_node, NULL_TREE);
72763 + decl = build_fn_decl(track_function, type);
72764 + DECL_ASSEMBLER_NAME(decl); // for LTO
72765 + call = gimple_build_call(decl, 0);
72766 + if (before)
72767 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72768 + else
72769 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72770 +}
72771 +
72772 +static unsigned int execute_stackleak_tree_instrument(void)
72773 +{
72774 + basic_block bb;
72775 + gimple_stmt_iterator gsi;
72776 +
72777 + // 1. loop through BBs and GIMPLE statements
72778 + FOR_EACH_BB(bb) {
72779 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72780 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72781 + tree decl;
72782 + gimple stmt = gsi_stmt(gsi);
72783 +
72784 + if (!is_gimple_call(stmt))
72785 + continue;
72786 + decl = gimple_call_fndecl(stmt);
72787 + if (!decl)
72788 + continue;
72789 + if (TREE_CODE(decl) != FUNCTION_DECL)
72790 + continue;
72791 + if (!DECL_BUILT_IN(decl))
72792 + continue;
72793 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72794 + continue;
72795 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72796 + continue;
72797 +
72798 + // 2. insert track call after each __builtin_alloca call
72799 + stackleak_add_instrumentation(&gsi, false);
72800 +// print_node(stderr, "pax", decl, 4);
72801 + }
72802 + }
72803 +
72804 + // 3. insert track call at the beginning
72805 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72806 + gsi = gsi_start_bb(bb);
72807 + stackleak_add_instrumentation(&gsi, true);
72808 +
72809 + return 0;
72810 +}
72811 +
72812 +static unsigned int execute_stackleak_final(void)
72813 +{
72814 + rtx insn;
72815 +
72816 + if (cfun->calls_alloca)
72817 + return 0;
72818 +
72819 + // 1. find pax_track_stack calls
72820 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72821 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72822 + rtx body;
72823 +
72824 + if (!CALL_P(insn))
72825 + continue;
72826 + body = PATTERN(insn);
72827 + if (GET_CODE(body) != CALL)
72828 + continue;
72829 + body = XEXP(body, 0);
72830 + if (GET_CODE(body) != MEM)
72831 + continue;
72832 + body = XEXP(body, 0);
72833 + if (GET_CODE(body) != SYMBOL_REF)
72834 + continue;
72835 + if (strcmp(XSTR(body, 0), track_function))
72836 + continue;
72837 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72838 + // 2. delete call if function frame is not big enough
72839 + if (get_frame_size() >= track_frame_size)
72840 + continue;
72841 + delete_insn_and_edges(insn);
72842 + }
72843 +
72844 +// print_simple_rtl(stderr, get_insns());
72845 +// print_rtl(stderr, get_insns());
72846 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72847 +
72848 + return 0;
72849 +}
72850 +
72851 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72852 +{
72853 + const char * const plugin_name = plugin_info->base_name;
72854 + const int argc = plugin_info->argc;
72855 + const struct plugin_argument * const argv = plugin_info->argv;
72856 + int i;
72857 + struct register_pass_info stackleak_tree_instrument_pass_info = {
72858 + .pass = &stackleak_tree_instrument_pass.pass,
72859 +// .reference_pass_name = "tree_profile",
72860 + .reference_pass_name = "optimized",
72861 + .ref_pass_instance_number = 0,
72862 + .pos_op = PASS_POS_INSERT_AFTER
72863 + };
72864 + struct register_pass_info stackleak_final_pass_info = {
72865 + .pass = &stackleak_final_rtl_opt_pass.pass,
72866 + .reference_pass_name = "final",
72867 + .ref_pass_instance_number = 0,
72868 + .pos_op = PASS_POS_INSERT_BEFORE
72869 + };
72870 +
72871 + if (!plugin_default_version_check(version, &gcc_version)) {
72872 + error(G_("incompatible gcc/plugin versions"));
72873 + return 1;
72874 + }
72875 +
72876 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
72877 +
72878 + for (i = 0; i < argc; ++i) {
72879 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72880 + if (!argv[i].value) {
72881 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72882 + continue;
72883 + }
72884 + track_frame_size = atoi(argv[i].value);
72885 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72886 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72887 + continue;
72888 + }
72889 + if (!strcmp(argv[i].key, "initialize-locals")) {
72890 + if (argv[i].value) {
72891 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72892 + continue;
72893 + }
72894 + init_locals = true;
72895 + continue;
72896 + }
72897 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72898 + }
72899 +
72900 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
72901 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
72902 +
72903 + return 0;
72904 +}
72905 diff -urNp linux-3.0.4/usr/gen_init_cpio.c linux-3.0.4/usr/gen_init_cpio.c
72906 --- linux-3.0.4/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
72907 +++ linux-3.0.4/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
72908 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
72909 int retval;
72910 int rc = -1;
72911 int namesize;
72912 - int i;
72913 + unsigned int i;
72914
72915 mode |= S_IFREG;
72916
72917 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
72918 *env_var = *expanded = '\0';
72919 strncat(env_var, start + 2, end - start - 2);
72920 strncat(expanded, new_location, start - new_location);
72921 - strncat(expanded, getenv(env_var), PATH_MAX);
72922 - strncat(expanded, end + 1, PATH_MAX);
72923 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72924 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72925 strncpy(new_location, expanded, PATH_MAX);
72926 + new_location[PATH_MAX] = 0;
72927 } else
72928 break;
72929 }
72930 diff -urNp linux-3.0.4/virt/kvm/kvm_main.c linux-3.0.4/virt/kvm/kvm_main.c
72931 --- linux-3.0.4/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
72932 +++ linux-3.0.4/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
72933 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
72934
72935 static cpumask_var_t cpus_hardware_enabled;
72936 static int kvm_usage_count = 0;
72937 -static atomic_t hardware_enable_failed;
72938 +static atomic_unchecked_t hardware_enable_failed;
72939
72940 struct kmem_cache *kvm_vcpu_cache;
72941 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
72942 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
72943
72944 if (r) {
72945 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
72946 - atomic_inc(&hardware_enable_failed);
72947 + atomic_inc_unchecked(&hardware_enable_failed);
72948 printk(KERN_INFO "kvm: enabling virtualization on "
72949 "CPU%d failed\n", cpu);
72950 }
72951 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
72952
72953 kvm_usage_count++;
72954 if (kvm_usage_count == 1) {
72955 - atomic_set(&hardware_enable_failed, 0);
72956 + atomic_set_unchecked(&hardware_enable_failed, 0);
72957 on_each_cpu(hardware_enable_nolock, NULL, 1);
72958
72959 - if (atomic_read(&hardware_enable_failed)) {
72960 + if (atomic_read_unchecked(&hardware_enable_failed)) {
72961 hardware_disable_all_nolock();
72962 r = -EBUSY;
72963 }
72964 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
72965 kvm_arch_vcpu_put(vcpu);
72966 }
72967
72968 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
72969 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
72970 struct module *module)
72971 {
72972 int r;
72973 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
72974 if (!vcpu_align)
72975 vcpu_align = __alignof__(struct kvm_vcpu);
72976 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
72977 - 0, NULL);
72978 + SLAB_USERCOPY, NULL);
72979 if (!kvm_vcpu_cache) {
72980 r = -ENOMEM;
72981 goto out_free_3;
72982 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
72983 if (r)
72984 goto out_free;
72985
72986 - kvm_chardev_ops.owner = module;
72987 - kvm_vm_fops.owner = module;
72988 - kvm_vcpu_fops.owner = module;
72989 + pax_open_kernel();
72990 + *(void **)&kvm_chardev_ops.owner = module;
72991 + *(void **)&kvm_vm_fops.owner = module;
72992 + *(void **)&kvm_vcpu_fops.owner = module;
72993 + pax_close_kernel();
72994
72995 r = misc_register(&kvm_dev);
72996 if (r) {